1 /* 2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 // Size of interpreter code. Increase if too small. Interpreter will 52 // fail with a guarantee ("not enough space for interpreter generation"); 53 // if too small. 54 // Run with +PrintInterpreter to get the VM to print out the size. 55 // Max size with JVMTI 56 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 57 58 #define __ _masm-> 59 60 //------------------------------------------------------------------------------------------------------------------------ 61 62 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 63 address entry = __ pc(); 64 65 // callee-save register for saving LR, shared with generate_native_entry 66 const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 67 68 __ mov(Rsaved_ret_addr, LR); 69 70 __ mov(R1, Rmethod); 71 __ mov(R2, Rlocals); 72 __ mov(R3, SP); 73 74 #ifdef AARCH64 75 // expand expr. stack and extended SP to avoid cutting SP in call_VM 76 __ mov(Rstack_top, SP); 77 __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 78 __ check_stack_top(); 79 80 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 81 82 __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 83 __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 84 __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 85 __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 86 87 __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 88 __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 89 __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 90 __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 91 #else 92 93 // Safer to save R9 (when scratched) since callers may have been 94 // written assuming R9 survives. This is suboptimal but 95 // probably not important for this slow case call site. 96 // Note for R9 saving: slow_signature_handler may copy register 97 // arguments above the current SP (passed as R3). It is safe for 98 // call_VM to use push and pop to protect additional values on the 99 // stack if needed. 100 __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 101 __ add(SP, SP, wordSize); // Skip R0 102 __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 103 #ifdef __ABI_HARD__ 104 // Few alternatives to an always-load-FP-registers approach: 105 // - parse method signature to detect FP arguments 106 // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 107 // The later has been originally implemented and tested but a conditional path could 108 // eliminate any gain imposed by avoiding 8 double word loads. 109 __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 110 #endif // __ABI_HARD__ 111 #endif // AARCH64 112 113 __ ret(Rsaved_ret_addr); 114 115 return entry; 116 } 117 118 119 // 120 // Various method entries (that c++ and asm interpreter agree upon) 121 //------------------------------------------------------------------------------------------------------------------------ 122 // 123 // 124 125 // Abstract method entry 126 // Attempt to execute abstract method. Throw exception 127 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 128 address entry_point = __ pc(); 129 130 #ifdef AARCH64 131 __ restore_sp_after_call(Rtemp); 132 __ restore_stack_top(); 133 #endif 134 135 __ empty_expression_stack(); 136 137 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 138 139 DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 140 return entry_point; 141 } 142 143 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 144 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 145 146 // TODO: ARM 147 return NULL; 148 149 address entry_point = __ pc(); 150 STOP("generate_math_entry"); 151 return entry_point; 152 } 153 154 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 155 address entry = __ pc(); 156 157 // Note: There should be a minimal interpreter frame set up when stack 158 // overflow occurs since we check explicitly for it now. 159 // 160 #ifdef ASSERT 161 { Label L; 162 __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 163 __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 164 // (stack grows negative) 165 __ b(L, ls); // check if frame is complete 166 __ stop ("interpreter frame not set up"); 167 __ bind(L); 168 } 169 #endif // ASSERT 170 171 // Restore bcp under the assumption that the current frame is still 172 // interpreted 173 __ restore_bcp(); 174 175 // expression stack must be empty before entering the VM if an exception 176 // happened 177 __ empty_expression_stack(); 178 179 // throw exception 180 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 181 182 __ should_not_reach_here(); 183 184 return entry; 185 } 186 187 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 188 address entry = __ pc(); 189 190 // index is in R4_ArrayIndexOutOfBounds_index 191 192 InlinedString Lname(name); 193 194 // expression stack must be empty before entering the VM if an exception happened 195 __ empty_expression_stack(); 196 197 // setup parameters 198 __ ldr_literal(R1, Lname); 199 __ mov(R2, R4_ArrayIndexOutOfBounds_index); 200 201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 202 203 __ nop(); // to avoid filling CPU pipeline with invalid instructions 204 __ nop(); 205 __ should_not_reach_here(); 206 __ bind_literal(Lname); 207 208 return entry; 209 } 210 211 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 212 address entry = __ pc(); 213 214 // object is in R2_ClassCastException_obj 215 216 // expression stack must be empty before entering the VM if an exception 217 // happened 218 __ empty_expression_stack(); 219 220 __ mov(R1, R2_ClassCastException_obj); 221 __ call_VM(noreg, 222 CAST_FROM_FN_PTR(address, 223 InterpreterRuntime::throw_ClassCastException), 224 R1); 225 226 __ should_not_reach_here(); 227 228 return entry; 229 } 230 231 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 232 assert(!pass_oop || message == NULL, "either oop or message but not both"); 233 address entry = __ pc(); 234 235 InlinedString Lname(name); 236 InlinedString Lmessage(message); 237 238 if (pass_oop) { 239 // object is at TOS 240 __ pop_ptr(R2); 241 } 242 243 // expression stack must be empty before entering the VM if an exception happened 244 __ empty_expression_stack(); 245 246 // setup parameters 247 __ ldr_literal(R1, Lname); 248 249 if (pass_oop) { 250 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 251 } else { 252 if (message != NULL) { 253 __ ldr_literal(R2, Lmessage); 254 } else { 255 __ mov(R2, 0); 256 } 257 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 258 } 259 260 // throw exception 261 __ b(Interpreter::throw_exception_entry()); 262 263 __ nop(); // to avoid filling CPU pipeline with invalid instructions 264 __ nop(); 265 __ bind_literal(Lname); 266 if (!pass_oop && (message != NULL)) { 267 __ bind_literal(Lmessage); 268 } 269 270 return entry; 271 } 272 273 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 274 // Not used. 275 STOP("generate_continuation_for"); 276 return NULL; 277 } 278 279 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 280 address entry = __ pc(); 281 282 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 283 284 #ifdef AARCH64 285 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 286 __ restore_stack_top(); 287 #else 288 // Restore stack bottom in case i2c adjusted stack 289 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 290 // and NULL it as marker that SP is now tos until next java call 291 __ mov(Rtemp, (int)NULL_WORD); 292 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 293 #endif // AARCH64 294 295 __ restore_method(); 296 __ restore_bcp(); 297 __ restore_dispatch(); 298 __ restore_locals(); 299 300 const Register Rcache = R2_tmp; 301 const Register Rindex = R3_tmp; 302 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 303 304 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 305 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 306 __ check_stack_top(); 307 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 308 309 #ifndef AARCH64 310 __ convert_retval_to_tos(state); 311 #endif // !AARCH64 312 313 __ dispatch_next(state, step); 314 315 return entry; 316 } 317 318 319 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 320 address entry = __ pc(); 321 322 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 323 324 #ifdef AARCH64 325 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 326 __ restore_stack_top(); 327 #else 328 // The stack is not extended by deopt but we must NULL last_sp as this 329 // entry is like a "return". 330 __ mov(Rtemp, 0); 331 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 332 #endif // AARCH64 333 334 __ restore_method(); 335 __ restore_bcp(); 336 __ restore_dispatch(); 337 __ restore_locals(); 338 339 // handle exceptions 340 { Label L; 341 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 342 __ cbz(Rtemp, L); 343 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 344 __ should_not_reach_here(); 345 __ bind(L); 346 } 347 348 __ dispatch_next(state, step); 349 350 return entry; 351 } 352 353 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 354 #ifdef AARCH64 355 address entry = __ pc(); 356 switch (type) { 357 case T_BOOLEAN: 358 __ tst(R0, 0xff); 359 __ cset(R0, ne); 360 break; 361 case T_CHAR : __ zero_extend(R0, R0, 16); break; 362 case T_BYTE : __ sign_extend(R0, R0, 8); break; 363 case T_SHORT : __ sign_extend(R0, R0, 16); break; 364 case T_INT : // fall through 365 case T_LONG : // fall through 366 case T_VOID : // fall through 367 case T_FLOAT : // fall through 368 case T_DOUBLE : /* nothing to do */ break; 369 case T_OBJECT : 370 // retrieve result from frame 371 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 372 // and verify it 373 __ verify_oop(R0); 374 break; 375 default : ShouldNotReachHere(); 376 } 377 __ ret(); 378 return entry; 379 #else 380 // Result handlers are not used on 32-bit ARM 381 // since the returned value is already in appropriate format. 382 __ should_not_reach_here(); // to avoid empty code block 383 384 // The result handler non-zero indicates an object is returned and this is 385 // used in the native entry code. 386 return type == T_OBJECT ? (address)(-1) : NULL; 387 #endif // AARCH64 388 } 389 390 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 391 address entry = __ pc(); 392 __ push(state); 393 __ call_VM(noreg, runtime_entry); 394 395 // load current bytecode 396 __ ldrb(R3_bytecode, Address(Rbcp)); 397 __ dispatch_only_normal(vtos); 398 return entry; 399 } 400 401 402 // Helpers for commoning out cases in the various type of method entries. 403 // 404 405 // increment invocation count & check for overflow 406 // 407 // Note: checking for negative value instead of overflow 408 // so we have a 'sticky' overflow test 409 // 410 // In: Rmethod. 411 // 412 // Uses R0, R1, Rtemp. 413 // 414 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 415 Label* profile_method, 416 Label* profile_method_continue) { 417 Label done; 418 const Register Rcounters = Rtemp; 419 const Address invocation_counter(Rcounters, 420 MethodCounters::invocation_counter_offset() + 421 InvocationCounter::counter_offset()); 422 423 // Note: In tiered we increment either counters in MethodCounters* or 424 // in MDO depending if we're profiling or not. 425 if (TieredCompilation) { 426 int increment = InvocationCounter::count_increment; 427 Label no_mdo; 428 if (ProfileInterpreter) { 429 // Are we profiling? 430 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 431 __ cbz(R1_tmp, no_mdo); 432 // Increment counter in the MDO 433 const Address mdo_invocation_counter(R1_tmp, 434 in_bytes(MethodData::invocation_counter_offset()) + 435 in_bytes(InvocationCounter::counter_offset())); 436 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 437 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 438 __ b(done); 439 } 440 __ bind(no_mdo); 441 __ get_method_counters(Rmethod, Rcounters, done); 442 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 443 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 444 __ bind(done); 445 } else { // not TieredCompilation 446 const Address backedge_counter(Rcounters, 447 MethodCounters::backedge_counter_offset() + 448 InvocationCounter::counter_offset()); 449 450 const Register Ricnt = R0_tmp; // invocation counter 451 const Register Rbcnt = R1_tmp; // backedge counter 452 453 __ get_method_counters(Rmethod, Rcounters, done); 454 455 if (ProfileInterpreter) { 456 const Register Riic = R1_tmp; 457 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 458 __ add(Riic, Riic, 1); 459 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 460 } 461 462 // Update standard invocation counters 463 464 __ ldr_u32(Ricnt, invocation_counter); 465 __ ldr_u32(Rbcnt, backedge_counter); 466 467 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 468 469 #ifdef AARCH64 470 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 471 #else 472 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 473 #endif // AARCH64 474 475 __ str_32(Ricnt, invocation_counter); // save invocation count 476 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 477 478 // profile_method is non-null only for interpreted method so 479 // profile_method != NULL == !native_call 480 // BytecodeInterpreter only calls for native so code is elided. 481 482 if (ProfileInterpreter && profile_method != NULL) { 483 assert(profile_method_continue != NULL, "should be non-null"); 484 485 // Test to see if we should create a method data oop 486 // Reuse R1_tmp as we don't need backedge counters anymore. 487 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 488 __ ldr_s32(R1_tmp, profile_limit); 489 __ cmp_32(Ricnt, R1_tmp); 490 __ b(*profile_method_continue, lt); 491 492 // if no method data exists, go to profile_method 493 __ test_method_data_pointer(R1_tmp, *profile_method); 494 } 495 496 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 497 __ ldr_s32(R1_tmp, invoke_limit); 498 __ cmp_32(Ricnt, R1_tmp); 499 __ b(*overflow, hs); 500 __ bind(done); 501 } 502 } 503 504 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 505 // InterpreterRuntime::frequency_counter_overflow takes one argument 506 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 507 // The call returns the address of the verified entry point for the method or NULL 508 // if the compilation did not complete (either went background or bailed out). 509 __ mov(R1, (int)false); 510 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 511 512 // jump to the interpreted entry. 513 __ b(do_continue); 514 } 515 516 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 517 // Check if we've got enough room on the stack for 518 // - overhead; 519 // - locals; 520 // - expression stack. 521 // 522 // Registers on entry: 523 // 524 // R3 = number of additional locals 525 // R11 = max expression stack slots (AArch64 only) 526 // Rthread 527 // Rmethod 528 // Registers used: R0, R1, R2, Rtemp. 529 530 const Register Radditional_locals = R3; 531 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 532 533 // monitor entry size 534 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 535 536 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 537 // be sure to change this if you add/subtract anything to/from the overhead area 538 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 539 540 // Pages reserved for VM runtime calls and subsequent Java calls. 541 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 542 543 // Thread::stack_size() includes guard pages, and they should not be touched. 544 const int guard_pages = JavaThread::stack_guard_zone_size(); 545 546 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 547 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 548 #ifndef AARCH64 549 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 550 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 551 #endif // !AARCH64 552 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 553 554 // reserve space for additional locals 555 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 556 557 // stack size 558 __ sub(R0, R0, R1); 559 560 // reserve space for expression stack 561 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 562 563 __ cmp(Rtemp, R0); 564 565 #ifdef AARCH64 566 Label L; 567 __ b(L, hi); 568 __ mov(SP, Rsender_sp); // restore SP 569 __ b(StubRoutines::throw_StackOverflowError_entry()); 570 __ bind(L); 571 #else 572 __ mov(SP, Rsender_sp, ls); // restore SP 573 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 574 #endif // AARCH64 575 } 576 577 578 // Allocate monitor and lock method (asm interpreter) 579 // 580 void TemplateInterpreterGenerator::lock_method() { 581 // synchronize method 582 583 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 584 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 585 586 #ifdef ASSERT 587 { Label L; 588 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 589 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 590 __ stop("method doesn't need synchronization"); 591 __ bind(L); 592 } 593 #endif // ASSERT 594 595 // get synchronization object 596 { Label done; 597 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 598 #ifdef AARCH64 599 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 600 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 601 #else 602 __ tst(Rtemp, JVM_ACC_STATIC); 603 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 604 __ b(done, eq); 605 #endif // AARCH64 606 __ load_mirror(R0, Rmethod, Rtemp); 607 __ bind(done); 608 } 609 610 // add space for monitor & lock 611 612 #ifdef AARCH64 613 __ check_extended_sp(Rtemp); 614 __ sub(SP, SP, entry_size); // adjust extended SP 615 __ mov(Rtemp, SP); 616 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 617 #endif // AARCH64 618 619 __ sub(Rstack_top, Rstack_top, entry_size); 620 __ check_stack_top_on_expansion(); 621 // add space for a monitor entry 622 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 623 // set new monitor block top 624 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 625 // store object 626 __ mov(R1, Rstack_top); // monitor entry address 627 __ lock_object(R1); 628 } 629 630 #ifdef AARCH64 631 632 // 633 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 634 // and for native methods hence the shared code. 635 // 636 // On entry: 637 // R10 = ConstMethod 638 // R11 = max expr. stack (in slots), if !native_call 639 // 640 // On exit: 641 // Rbcp, Rstack_top are initialized, SP is extended 642 // 643 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 644 // Incoming registers 645 const Register RconstMethod = R10; 646 const Register RmaxStack = R11; 647 // Temporary registers 648 const Register RextendedSP = R0; 649 const Register Rcache = R1; 650 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 651 652 // Generates the following stack layout (stack grows up in this picture): 653 // 654 // [ expr. stack bottom ] 655 // [ saved Rbcp ] 656 // [ current Rlocals ] 657 // [ cache ] 658 // [ mdx ] 659 // [ mirror ] 660 // [ Method* ] 661 // [ extended SP ] 662 // [ expr. stack top ] 663 // [ sender_sp ] 664 // [ saved FP ] <--- FP 665 // [ saved LR ] 666 667 // initialize fixed part of activation frame 668 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 669 __ mov(FP, SP); // establish new FP 670 671 // setup Rbcp 672 if (native_call) { 673 __ mov(Rbcp, ZR); // bcp = 0 for native calls 674 } else { 675 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 676 } 677 678 // Rstack_top & RextendedSP 679 __ sub(Rstack_top, SP, 10*wordSize); 680 if (native_call) { 681 __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 682 } else { 683 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 684 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 685 } 686 __ mov(SP, RextendedSP); 687 __ check_stack_top(); 688 689 // Load Rmdp 690 if (ProfileInterpreter) { 691 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 692 __ tst(Rtemp, Rtemp); 693 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 694 __ csel(Rmdp, ZR, Rtemp, eq); 695 } 696 697 // Load Rcache 698 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 699 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 700 // Get mirror and store it in the frame as GC root for this Method* 701 __ load_mirror(Rtemp, Rmethod, Rtemp); 702 703 // Build fixed frame 704 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 705 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 706 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 707 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 708 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 709 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 710 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 711 } 712 713 #else // AARCH64 714 715 // 716 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 717 // and for native methods hence the shared code. 718 719 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 720 // Generates the following stack layout: 721 // 722 // [ expr. stack bottom ] 723 // [ saved Rbcp ] 724 // [ current Rlocals ] 725 // [ cache ] 726 // [ mdx ] 727 // [ Method* ] 728 // [ last_sp ] 729 // [ sender_sp ] 730 // [ saved FP ] <--- FP 731 // [ saved LR ] 732 733 // initialize fixed part of activation frame 734 __ push(LR); // save return address 735 __ push(FP); // save FP 736 __ mov(FP, SP); // establish new FP 737 738 __ push(Rsender_sp); 739 740 __ mov(R0, 0); 741 __ push(R0); // leave last_sp as null 742 743 // setup Rbcp 744 if (native_call) { 745 __ mov(Rbcp, 0); // bcp = 0 for native calls 746 } else { 747 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 748 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 749 } 750 751 __ push(Rmethod); // save Method* 752 // Get mirror and store it in the frame as GC root for this Method* 753 __ load_mirror(Rtemp, Rmethod, Rtemp); 754 __ push(Rtemp); 755 756 if (ProfileInterpreter) { 757 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 758 __ tst(Rtemp, Rtemp); 759 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 760 __ push(Rtemp); // set the mdp (method data pointer) 761 } else { 762 __ push(R0); 763 } 764 765 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 766 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 767 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 768 __ push(Rtemp); // set constant pool cache 769 __ push(Rlocals); // set locals pointer 770 __ push(Rbcp); // set bcp 771 __ push(R0); // reserve word for pointer to expression stack bottom 772 __ str(SP, Address(SP, 0)); // set expression stack bottom 773 } 774 775 #endif // AARCH64 776 777 // End of helpers 778 779 //------------------------------------------------------------------------------------------------------------------------ 780 // Entry points 781 // 782 // Here we generate the various kind of entries into the interpreter. 783 // The two main entry type are generic bytecode methods and native call method. 784 // These both come in synchronized and non-synchronized versions but the 785 // frame layout they create is very similar. The other method entry 786 // types are really just special purpose entries that are really entry 787 // and interpretation all in one. These are for trivial methods like 788 // accessor, empty, or special math methods. 789 // 790 // When control flow reaches any of the entry types for the interpreter 791 // the following holds -> 792 // 793 // Arguments: 794 // 795 // Rmethod: Method* 796 // Rthread: thread 797 // Rsender_sp: sender sp 798 // Rparams (SP on 32-bit ARM): pointer to method parameters 799 // 800 // LR: return address 801 // 802 // Stack layout immediately at entry 803 // 804 // [ optional padding(*)] <--- SP (AArch64) 805 // [ parameter n ] <--- Rparams (SP on 32-bit ARM) 806 // ... 807 // [ parameter 1 ] 808 // [ expression stack ] (caller's java expression stack) 809 810 // Assuming that we don't go to one of the trivial specialized 811 // entries the stack will look like below when we are ready to execute 812 // the first bytecode (or call the native routine). The register usage 813 // will be as the template based interpreter expects. 814 // 815 // local variables follow incoming parameters immediately; i.e. 816 // the return address is saved at the end of the locals. 817 // 818 // [ reserved stack (*) ] <--- SP (AArch64) 819 // [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 820 // [ monitor entry ] 821 // ... 822 // [ monitor entry ] 823 // [ expr. stack bottom ] 824 // [ saved Rbcp ] 825 // [ current Rlocals ] 826 // [ cache ] 827 // [ mdx ] 828 // [ mirror ] 829 // [ Method* ] 830 // 831 // 32-bit ARM: 832 // [ last_sp ] 833 // 834 // AArch64: 835 // [ extended SP (*) ] 836 // [ stack top (*) ] 837 // 838 // [ sender_sp ] 839 // [ saved FP ] <--- FP 840 // [ saved LR ] 841 // [ optional padding(*)] 842 // [ local variable m ] 843 // ... 844 // [ local variable 1 ] 845 // [ parameter n ] 846 // ... 847 // [ parameter 1 ] <--- Rlocals 848 // 849 // (*) - AArch64 only 850 // 851 852 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 853 #if INCLUDE_ALL_GCS 854 if (UseG1GC) { 855 // Code: _aload_0, _getfield, _areturn 856 // parameter size = 1 857 // 858 // The code that gets generated by this routine is split into 2 parts: 859 // 1. The "intrinsified" code for G1 (or any SATB based GC), 860 // 2. The slow path - which is an expansion of the regular method entry. 861 // 862 // Notes:- 863 // * In the G1 code we do not check whether we need to block for 864 // a safepoint. If G1 is enabled then we must execute the specialized 865 // code for Reference.get (except when the Reference object is null) 866 // so that we can log the value in the referent field with an SATB 867 // update buffer. 868 // If the code for the getfield template is modified so that the 869 // G1 pre-barrier code is executed when the current method is 870 // Reference.get() then going through the normal method entry 871 // will be fine. 872 // * The G1 code can, however, check the receiver object (the instance 873 // of java.lang.Reference) and jump to the slow path if null. If the 874 // Reference object is null then we obviously cannot fetch the referent 875 // and so we don't need to call the G1 pre-barrier. Thus we can use the 876 // regular method entry code to generate the NPE. 877 // 878 // This code is based on generate_accessor_enty. 879 // 880 // Rmethod: Method* 881 // Rthread: thread 882 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 883 // Rparams: parameters 884 885 address entry = __ pc(); 886 Label slow_path; 887 const Register Rthis = R0; 888 const Register Rret_addr = Rtmp_save1; 889 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 890 891 const int referent_offset = java_lang_ref_Reference::referent_offset; 892 guarantee(referent_offset > 0, "referent offset not initialized"); 893 894 // Check if local 0 != NULL 895 // If the receiver is null then it is OK to jump to the slow path. 896 __ ldr(Rthis, Address(Rparams)); 897 __ cbz(Rthis, slow_path); 898 899 // Generate the G1 pre-barrier code to log the value of 900 // the referent field in an SATB buffer. 901 902 // Load the value of the referent field. 903 __ load_heap_oop(R0, Address(Rthis, referent_offset)); 904 905 // Preserve LR 906 __ mov(Rret_addr, LR); 907 908 __ g1_write_barrier_pre(noreg, // store_addr 909 noreg, // new_val 910 R0, // pre_val 911 Rtemp, // tmp1 912 R1_tmp); // tmp2 913 914 // _areturn 915 __ mov(SP, Rsender_sp); 916 __ ret(Rret_addr); 917 918 // generate a vanilla interpreter entry as the slow path 919 __ bind(slow_path); 920 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 921 return entry; 922 } 923 #endif // INCLUDE_ALL_GCS 924 925 // If G1 is not enabled then attempt to go through the normal entry point 926 return NULL; 927 } 928 929 // Not supported 930 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 931 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 932 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 933 934 // 935 // Interpreter stub for calling a native method. (asm interpreter) 936 // This sets up a somewhat different looking stack for calling the native method 937 // than the typical interpreter frame setup. 938 // 939 940 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 941 // determine code generation flags 942 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 943 944 // Incoming registers: 945 // 946 // Rmethod: Method* 947 // Rthread: thread 948 // Rsender_sp: sender sp 949 // Rparams: parameters 950 951 address entry_point = __ pc(); 952 953 // Register allocation 954 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 955 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 956 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 957 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 958 959 #ifdef AARCH64 960 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 961 const Register Rsaved_result = Rnative_code; 962 const FloatRegister Dsaved_result = V8; 963 #else 964 const Register Rsaved_result_lo = Rtmp_save0; // R4 965 const Register Rsaved_result_hi = Rtmp_save1; // R5 966 FloatRegister saved_result_fp; 967 #endif // AARCH64 968 969 970 #ifdef AARCH64 971 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 972 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 973 #else 974 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 975 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 976 #endif // AARCH64 977 978 // native calls don't need the stack size check since they have no expression stack 979 // and the arguments are already on the stack and we only add a handful of words 980 // to the stack 981 982 // compute beginning of parameters (Rlocals) 983 __ sub(Rlocals, Rparams, wordSize); 984 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 985 986 #ifdef AARCH64 987 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 988 if(__ can_post_interpreter_events()) { 989 // extra space for saved results 990 extra_stack_reserve += 2*wordSize; 991 } 992 // reserve extra stack space and nullify oop_temp slot 993 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 994 #else 995 // reserve stack space for oop_temp 996 __ mov(R0, 0); 997 __ push(R0); 998 #endif // AARCH64 999 1000 generate_fixed_frame(true); // Note: R9 is now saved in the frame 1001 1002 // make sure method is native & not abstract 1003 #ifdef ASSERT 1004 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1005 { 1006 Label L; 1007 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1008 __ stop("tried to execute non-native method as native"); 1009 __ bind(L); 1010 } 1011 { Label L; 1012 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1013 __ stop("tried to execute abstract method in interpreter"); 1014 __ bind(L); 1015 } 1016 #endif 1017 1018 // increment invocation count & check for overflow 1019 Label invocation_counter_overflow; 1020 if (inc_counter) { 1021 if (synchronized) { 1022 // Avoid unlocking method's monitor in case of exception, as it has not 1023 // been locked yet. 1024 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1025 } 1026 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1027 } 1028 1029 Label continue_after_compile; 1030 __ bind(continue_after_compile); 1031 1032 if (inc_counter && synchronized) { 1033 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1034 } 1035 1036 // check for synchronized methods 1037 // Must happen AFTER invocation_counter check and stack overflow check, 1038 // so method is not locked if overflows. 1039 // 1040 if (synchronized) { 1041 lock_method(); 1042 } else { 1043 // no synchronization necessary 1044 #ifdef ASSERT 1045 { Label L; 1046 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1047 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1048 __ stop("method needs synchronization"); 1049 __ bind(L); 1050 } 1051 #endif 1052 } 1053 1054 // start execution 1055 #ifdef ASSERT 1056 { Label L; 1057 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1058 __ cmp(Rtemp, Rstack_top); 1059 __ b(L, eq); 1060 __ stop("broken stack frame setup in interpreter"); 1061 __ bind(L); 1062 } 1063 #endif 1064 __ check_extended_sp(Rtemp); 1065 1066 // jvmti/dtrace support 1067 __ notify_method_entry(); 1068 #if R9_IS_SCRATCHED 1069 __ restore_method(); 1070 #endif 1071 1072 { 1073 Label L; 1074 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1075 __ cbnz(Rsig_handler, L); 1076 __ mov(R1, Rmethod); 1077 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1078 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1079 __ bind(L); 1080 } 1081 1082 { 1083 Label L; 1084 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1085 __ cbnz(Rnative_code, L); 1086 __ mov(R1, Rmethod); 1087 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1088 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1089 __ bind(L); 1090 } 1091 1092 // Allocate stack space for arguments 1093 1094 #ifdef AARCH64 1095 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1096 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1097 1098 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1099 // 8 * wordSize for GPRs 1100 // 8 * wordSize for FPRs 1101 int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1102 #else 1103 1104 // C functions need aligned stack 1105 __ bic(SP, SP, StackAlignmentInBytes - 1); 1106 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1107 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1108 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1109 1110 #ifdef __ABI_HARD__ 1111 // Allocate more stack space to accomodate all GP as well as FP registers: 1112 // 4 * wordSize 1113 // 8 * BytesPerLong 1114 int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1115 #else 1116 // Reserve at least 4 words on the stack for loading 1117 // of parameters passed on registers (R0-R3). 1118 // See generate_slow_signature_handler(). 1119 // It is also used for JNIEnv & class additional parameters. 1120 int reg_arguments = 4 * wordSize; 1121 #endif // __ABI_HARD__ 1122 #endif // AARCH64 1123 1124 __ sub(SP, SP, reg_arguments); 1125 1126 1127 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1128 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1129 __ call(Rsig_handler); 1130 #if R9_IS_SCRATCHED 1131 __ restore_method(); 1132 #endif 1133 __ mov(Rresult_handler, R0); 1134 1135 // Pass JNIEnv and mirror for static methods 1136 { 1137 Label L; 1138 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1139 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1140 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1141 __ load_mirror(Rtemp, Rmethod, Rtemp); 1142 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1143 __ str(Rtemp, Address(R1, 0)); 1144 __ bind(L); 1145 } 1146 1147 __ set_last_Java_frame(SP, FP, true, Rtemp); 1148 1149 // Changing state to _thread_in_native must be the last thing to do 1150 // before the jump to native code. At this moment stack must be 1151 // safepoint-safe and completely prepared for stack walking. 1152 #ifdef ASSERT 1153 { 1154 Label L; 1155 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1156 __ cmp_32(Rtemp, _thread_in_Java); 1157 __ b(L, eq); 1158 __ stop("invalid thread state"); 1159 __ bind(L); 1160 } 1161 #endif 1162 1163 #ifdef AARCH64 1164 __ mov(Rtemp, _thread_in_native); 1165 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1166 // STLR is used to force all preceding writes to be observed prior to thread state change 1167 __ stlr_w(Rtemp, Rtemp2); 1168 #else 1169 // Force all preceding writes to be observed prior to thread state change 1170 __ membar(MacroAssembler::StoreStore, Rtemp); 1171 1172 __ mov(Rtemp, _thread_in_native); 1173 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1174 #endif // AARCH64 1175 1176 __ call(Rnative_code); 1177 #if R9_IS_SCRATCHED 1178 __ restore_method(); 1179 #endif 1180 1181 // Set FPSCR/FPCR to a known state 1182 if (AlwaysRestoreFPU) { 1183 __ restore_default_fp_mode(); 1184 } 1185 1186 // Do safepoint check 1187 __ mov(Rtemp, _thread_in_native_trans); 1188 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1189 1190 // Force this write out before the read below 1191 __ membar(MacroAssembler::StoreLoad, Rtemp); 1192 1193 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1194 1195 // Protect the return value in the interleaved code: save it to callee-save registers. 1196 #ifdef AARCH64 1197 __ mov(Rsaved_result, R0); 1198 __ fmov_d(Dsaved_result, D0); 1199 #else 1200 __ mov(Rsaved_result_lo, R0); 1201 __ mov(Rsaved_result_hi, R1); 1202 #ifdef __ABI_HARD__ 1203 // preserve native FP result in a callee-saved register 1204 saved_result_fp = D8; 1205 __ fcpyd(saved_result_fp, D0); 1206 #else 1207 saved_result_fp = fnoreg; 1208 #endif // __ABI_HARD__ 1209 #endif // AARCH64 1210 1211 { 1212 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1213 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1214 __ cond_cmp(R3, 0, eq); 1215 1216 #ifdef AARCH64 1217 Label L; 1218 __ b(L, eq); 1219 __ mov(R0, Rthread); 1220 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1221 __ bind(L); 1222 #else 1223 __ mov(R0, Rthread, ne); 1224 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1225 #if R9_IS_SCRATCHED 1226 __ restore_method(); 1227 #endif 1228 #endif // AARCH64 1229 } 1230 1231 // Perform Native->Java thread transition 1232 __ mov(Rtemp, _thread_in_Java); 1233 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1234 1235 // Zero handles and last_java_sp 1236 __ reset_last_Java_frame(Rtemp); 1237 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1238 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1239 if (CheckJNICalls) { 1240 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1241 } 1242 1243 // Unbox if the result is non-zero object 1244 #ifdef AARCH64 1245 { 1246 Label L, Lnull; 1247 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1248 __ cmp(Rresult_handler, Rtemp); 1249 __ b(L, ne); 1250 __ cbz(Rsaved_result, Lnull); 1251 __ ldr(Rsaved_result, Address(Rsaved_result)); 1252 __ bind(Lnull); 1253 // Store oop on the stack for GC 1254 __ str(Rsaved_result, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1255 __ bind(L); 1256 } 1257 #else 1258 __ tst(Rsaved_result_lo, Rresult_handler); 1259 __ ldr(Rsaved_result_lo, Address(Rsaved_result_lo), ne); 1260 1261 // Store oop on the stack for GC 1262 __ cmp(Rresult_handler, 0); 1263 __ str(Rsaved_result_lo, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1264 #endif // AARCH64 1265 1266 #ifdef AARCH64 1267 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1268 __ restore_sp_after_call(Rtemp); 1269 __ check_stack_top(); 1270 #endif // AARCH64 1271 1272 // reguard stack if StackOverflow exception happened while in native. 1273 { 1274 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1275 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1276 #ifdef AARCH64 1277 Label L; 1278 __ b(L, ne); 1279 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1280 __ bind(L); 1281 #else 1282 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1283 #if R9_IS_SCRATCHED 1284 __ restore_method(); 1285 #endif 1286 #endif // AARCH64 1287 } 1288 1289 // check pending exceptions 1290 { 1291 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1292 #ifdef AARCH64 1293 Label L; 1294 __ cbz(Rtemp, L); 1295 __ mov_pc_to(Rexception_pc); 1296 __ b(StubRoutines::forward_exception_entry()); 1297 __ bind(L); 1298 #else 1299 __ cmp(Rtemp, 0); 1300 __ mov(Rexception_pc, PC, ne); 1301 __ b(StubRoutines::forward_exception_entry(), ne); 1302 #endif // AARCH64 1303 } 1304 1305 if (synchronized) { 1306 // address of first monitor 1307 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1308 __ unlock_object(R1); 1309 } 1310 1311 // jvmti/dtrace support 1312 // Note: This must happen _after_ handling/throwing any exceptions since 1313 // the exception handler code notifies the runtime of method exits 1314 // too. If this happens before, method entry/exit notifications are 1315 // not properly paired (was bug - gri 11/22/99). 1316 #ifdef AARCH64 1317 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1318 #else 1319 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1320 #endif // AARCH64 1321 1322 // Restore the result. Oop result is restored from the stack. 1323 #ifdef AARCH64 1324 __ mov(R0, Rsaved_result); 1325 __ fmov_d(D0, Dsaved_result); 1326 1327 __ blr(Rresult_handler); 1328 #else 1329 __ cmp(Rresult_handler, 0); 1330 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1331 __ mov(R0, Rsaved_result_lo, eq); 1332 __ mov(R1, Rsaved_result_hi); 1333 1334 #ifdef __ABI_HARD__ 1335 // reload native FP result 1336 __ fcpyd(D0, D8); 1337 #endif // __ABI_HARD__ 1338 1339 #ifdef ASSERT 1340 if (VerifyOops) { 1341 Label L; 1342 __ cmp(Rresult_handler, 0); 1343 __ b(L, eq); 1344 __ verify_oop(R0); 1345 __ bind(L); 1346 } 1347 #endif // ASSERT 1348 #endif // AARCH64 1349 1350 // Restore FP/LR, sender_sp and return 1351 #ifdef AARCH64 1352 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1353 __ ldp(FP, LR, Address(FP)); 1354 __ mov(SP, Rtemp); 1355 #else 1356 __ mov(Rtemp, FP); 1357 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1358 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1359 #endif // AARCH64 1360 1361 __ ret(); 1362 1363 if (inc_counter) { 1364 // Handle overflow of counter and compile method 1365 __ bind(invocation_counter_overflow); 1366 generate_counter_overflow(continue_after_compile); 1367 } 1368 1369 return entry_point; 1370 } 1371 1372 // 1373 // Generic interpreted method entry to (asm) interpreter 1374 // 1375 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1376 // determine code generation flags 1377 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1378 1379 // Rmethod: Method* 1380 // Rthread: thread 1381 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1382 // Rparams: pointer to the last parameter in the stack 1383 1384 address entry_point = __ pc(); 1385 1386 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1387 1388 #ifdef AARCH64 1389 const Register RmaxStack = R11; 1390 const Register RlocalsBase = R12; 1391 #endif // AARCH64 1392 1393 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1394 1395 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1396 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1397 1398 // setup Rlocals 1399 __ sub(Rlocals, Rparams, wordSize); 1400 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1401 1402 __ sub(R3, R3, R2); // number of additional locals 1403 1404 #ifdef AARCH64 1405 // setup RmaxStack 1406 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1407 __ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument 1408 #endif // AARCH64 1409 1410 // see if we've got enough room on the stack for locals plus overhead. 1411 generate_stack_overflow_check(); 1412 1413 #ifdef AARCH64 1414 1415 // allocate space for locals 1416 { 1417 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1418 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1419 } 1420 1421 // explicitly initialize locals 1422 { 1423 Label zero_loop, done; 1424 __ cbz(R3, done); 1425 1426 __ tbz(R3, 0, zero_loop); 1427 __ subs(R3, R3, 1); 1428 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1429 __ b(done, eq); 1430 1431 __ bind(zero_loop); 1432 __ subs(R3, R3, 2); 1433 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1434 __ b(zero_loop, ne); 1435 1436 __ bind(done); 1437 } 1438 1439 #else 1440 // allocate space for locals 1441 // explicitly initialize locals 1442 1443 // Loop is unrolled 4 times 1444 Label loop; 1445 __ mov(R0, 0); 1446 __ bind(loop); 1447 1448 // #1 1449 __ subs(R3, R3, 1); 1450 __ push(R0, ge); 1451 1452 // #2 1453 __ subs(R3, R3, 1, ge); 1454 __ push(R0, ge); 1455 1456 // #3 1457 __ subs(R3, R3, 1, ge); 1458 __ push(R0, ge); 1459 1460 // #4 1461 __ subs(R3, R3, 1, ge); 1462 __ push(R0, ge); 1463 1464 __ b(loop, gt); 1465 #endif // AARCH64 1466 1467 // initialize fixed part of activation frame 1468 generate_fixed_frame(false); 1469 1470 __ restore_dispatch(); 1471 1472 // make sure method is not native & not abstract 1473 #ifdef ASSERT 1474 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1475 { 1476 Label L; 1477 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1478 __ stop("tried to execute native method as non-native"); 1479 __ bind(L); 1480 } 1481 { Label L; 1482 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1483 __ stop("tried to execute abstract method in interpreter"); 1484 __ bind(L); 1485 } 1486 #endif 1487 1488 // increment invocation count & check for overflow 1489 Label invocation_counter_overflow; 1490 Label profile_method; 1491 Label profile_method_continue; 1492 if (inc_counter) { 1493 if (synchronized) { 1494 // Avoid unlocking method's monitor in case of exception, as it has not 1495 // been locked yet. 1496 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1497 } 1498 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1499 if (ProfileInterpreter) { 1500 __ bind(profile_method_continue); 1501 } 1502 } 1503 Label continue_after_compile; 1504 __ bind(continue_after_compile); 1505 1506 if (inc_counter && synchronized) { 1507 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1508 } 1509 #if R9_IS_SCRATCHED 1510 __ restore_method(); 1511 #endif 1512 1513 // check for synchronized methods 1514 // Must happen AFTER invocation_counter check and stack overflow check, 1515 // so method is not locked if overflows. 1516 // 1517 if (synchronized) { 1518 // Allocate monitor and lock method 1519 lock_method(); 1520 } else { 1521 // no synchronization necessary 1522 #ifdef ASSERT 1523 { Label L; 1524 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1525 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1526 __ stop("method needs synchronization"); 1527 __ bind(L); 1528 } 1529 #endif 1530 } 1531 1532 // start execution 1533 #ifdef ASSERT 1534 { Label L; 1535 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1536 __ cmp(Rtemp, Rstack_top); 1537 __ b(L, eq); 1538 __ stop("broken stack frame setup in interpreter"); 1539 __ bind(L); 1540 } 1541 #endif 1542 __ check_extended_sp(Rtemp); 1543 1544 // jvmti support 1545 __ notify_method_entry(); 1546 #if R9_IS_SCRATCHED 1547 __ restore_method(); 1548 #endif 1549 1550 __ dispatch_next(vtos); 1551 1552 // invocation counter overflow 1553 if (inc_counter) { 1554 if (ProfileInterpreter) { 1555 // We have decided to profile this method in the interpreter 1556 __ bind(profile_method); 1557 1558 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1559 __ set_method_data_pointer_for_bcp(); 1560 1561 __ b(profile_method_continue); 1562 } 1563 1564 // Handle overflow of counter and compile method 1565 __ bind(invocation_counter_overflow); 1566 generate_counter_overflow(continue_after_compile); 1567 } 1568 1569 return entry_point; 1570 } 1571 1572 //------------------------------------------------------------------------------------------------------------------------ 1573 // Exceptions 1574 1575 void TemplateInterpreterGenerator::generate_throw_exception() { 1576 // Entry point in previous activation (i.e., if the caller was interpreted) 1577 Interpreter::_rethrow_exception_entry = __ pc(); 1578 // Rexception_obj: exception 1579 1580 #ifndef AARCH64 1581 // Clear interpreter_frame_last_sp. 1582 __ mov(Rtemp, 0); 1583 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1584 #endif // !AARCH64 1585 1586 #if R9_IS_SCRATCHED 1587 __ restore_method(); 1588 #endif 1589 __ restore_bcp(); 1590 __ restore_dispatch(); 1591 __ restore_locals(); 1592 1593 #ifdef AARCH64 1594 __ restore_sp_after_call(Rtemp); 1595 #endif // AARCH64 1596 1597 // Entry point for exceptions thrown within interpreter code 1598 Interpreter::_throw_exception_entry = __ pc(); 1599 1600 // expression stack is undefined here 1601 // Rexception_obj: exception 1602 // Rbcp: exception bcp 1603 __ verify_oop(Rexception_obj); 1604 1605 // expression stack must be empty before entering the VM in case of an exception 1606 __ empty_expression_stack(); 1607 // find exception handler address and preserve exception oop 1608 __ mov(R1, Rexception_obj); 1609 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1610 // R0: exception handler entry point 1611 // Rexception_obj: preserved exception oop 1612 // Rbcp: bcp for exception handler 1613 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1614 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1615 1616 // If the exception is not handled in the current frame the frame is removed and 1617 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1618 // 1619 // Note: At this point the bci is still the bxi for the instruction which caused 1620 // the exception and the expression stack is empty. Thus, for any VM calls 1621 // at this point, GC will find a legal oop map (with empty expression stack). 1622 1623 // In current activation 1624 // tos: exception 1625 // Rbcp: exception bcp 1626 1627 // 1628 // JVMTI PopFrame support 1629 // 1630 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1631 1632 #ifdef AARCH64 1633 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1634 #endif // AARCH64 1635 1636 __ empty_expression_stack(); 1637 1638 // Set the popframe_processing bit in _popframe_condition indicating that we are 1639 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1640 // popframe handling cycles. 1641 1642 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1643 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1644 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1645 1646 { 1647 // Check to see whether we are returning to a deoptimized frame. 1648 // (The PopFrame call ensures that the caller of the popped frame is 1649 // either interpreted or compiled and deoptimizes it if compiled.) 1650 // In this case, we can't call dispatch_next() after the frame is 1651 // popped, but instead must save the incoming arguments and restore 1652 // them after deoptimization has occurred. 1653 // 1654 // Note that we don't compare the return PC against the 1655 // deoptimization blob's unpack entry because of the presence of 1656 // adapter frames in C2. 1657 Label caller_not_deoptimized; 1658 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1659 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1660 __ cbnz_32(R0, caller_not_deoptimized); 1661 #ifdef AARCH64 1662 __ NOT_TESTED(); 1663 #endif 1664 1665 // Compute size of arguments for saving when returning to deoptimized caller 1666 __ restore_method(); 1667 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1668 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1669 1670 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1671 // Save these arguments 1672 __ restore_locals(); 1673 __ sub(R2, Rlocals, R1); 1674 __ add(R2, R2, wordSize); 1675 __ mov(R0, Rthread); 1676 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1677 1678 __ remove_activation(vtos, LR, 1679 /* throw_monitor_exception */ false, 1680 /* install_monitor_exception */ false, 1681 /* notify_jvmdi */ false); 1682 1683 // Inform deoptimization that it is responsible for restoring these arguments 1684 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1685 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1686 1687 // Continue in deoptimization handler 1688 __ ret(); 1689 1690 __ bind(caller_not_deoptimized); 1691 } 1692 1693 __ remove_activation(vtos, R4, 1694 /* throw_monitor_exception */ false, 1695 /* install_monitor_exception */ false, 1696 /* notify_jvmdi */ false); 1697 1698 #ifndef AARCH64 1699 // Finish with popframe handling 1700 // A previous I2C followed by a deoptimization might have moved the 1701 // outgoing arguments further up the stack. PopFrame expects the 1702 // mutations to those outgoing arguments to be preserved and other 1703 // constraints basically require this frame to look exactly as 1704 // though it had previously invoked an interpreted activation with 1705 // no space between the top of the expression stack (current 1706 // last_sp) and the top of stack. Rather than force deopt to 1707 // maintain this kind of invariant all the time we call a small 1708 // fixup routine to move the mutated arguments onto the top of our 1709 // expression stack if necessary. 1710 __ mov(R1, SP); 1711 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1712 // PC must point into interpreter here 1713 __ set_last_Java_frame(SP, FP, true, Rtemp); 1714 __ mov(R0, Rthread); 1715 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1716 __ reset_last_Java_frame(Rtemp); 1717 #endif // !AARCH64 1718 1719 #ifdef AARCH64 1720 __ restore_sp_after_call(Rtemp); 1721 __ restore_stack_top(); 1722 #else 1723 // Restore the last_sp and null it out 1724 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1725 __ mov(Rtemp, (int)NULL_WORD); 1726 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1727 #endif // AARCH64 1728 1729 __ restore_bcp(); 1730 __ restore_dispatch(); 1731 __ restore_locals(); 1732 __ restore_method(); 1733 1734 // The method data pointer was incremented already during 1735 // call profiling. We have to restore the mdp for the current bcp. 1736 if (ProfileInterpreter) { 1737 __ set_method_data_pointer_for_bcp(); 1738 } 1739 1740 // Clear the popframe condition flag 1741 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1742 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1743 1744 #if INCLUDE_JVMTI 1745 { 1746 Label L_done; 1747 1748 __ ldrb(Rtemp, Address(Rbcp, 0)); 1749 __ cmp(Rtemp, Bytecodes::_invokestatic); 1750 __ b(L_done, ne); 1751 1752 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1753 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1754 1755 // get local0 1756 __ ldr(R1, Address(Rlocals, 0)); 1757 __ mov(R2, Rmethod); 1758 __ mov(R3, Rbcp); 1759 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1760 1761 __ cbz(R0, L_done); 1762 1763 __ str(R0, Address(Rstack_top)); 1764 __ bind(L_done); 1765 } 1766 #endif // INCLUDE_JVMTI 1767 1768 __ dispatch_next(vtos); 1769 // end of PopFrame support 1770 1771 Interpreter::_remove_activation_entry = __ pc(); 1772 1773 // preserve exception over this code sequence 1774 __ pop_ptr(R0_tos); 1775 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1776 // remove the activation (without doing throws on illegalMonitorExceptions) 1777 __ remove_activation(vtos, Rexception_pc, false, true, false); 1778 // restore exception 1779 __ get_vm_result(Rexception_obj, Rtemp); 1780 1781 // Inbetween activations - previous activation type unknown yet 1782 // compute continuation point - the continuation point expects 1783 // the following registers set up: 1784 // 1785 // Rexception_obj: exception 1786 // Rexception_pc: return address/pc that threw exception 1787 // SP: expression stack of caller 1788 // FP: frame pointer of caller 1789 __ mov(c_rarg0, Rthread); 1790 __ mov(c_rarg1, Rexception_pc); 1791 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1792 // Note that an "issuing PC" is actually the next PC after the call 1793 1794 __ jump(R0); // jump to exception handler of caller 1795 } 1796 1797 1798 // 1799 // JVMTI ForceEarlyReturn support 1800 // 1801 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1802 address entry = __ pc(); 1803 1804 #ifdef AARCH64 1805 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1806 #endif // AARCH64 1807 1808 __ restore_bcp(); 1809 __ restore_dispatch(); 1810 __ restore_locals(); 1811 1812 __ empty_expression_stack(); 1813 1814 __ load_earlyret_value(state); 1815 1816 // Clear the earlyret state 1817 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1818 1819 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1820 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1821 1822 __ remove_activation(state, LR, 1823 false, /* throw_monitor_exception */ 1824 false, /* install_monitor_exception */ 1825 true); /* notify_jvmdi */ 1826 1827 #ifndef AARCH64 1828 // According to interpreter calling conventions, result is returned in R0/R1, 1829 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1830 // This conversion should be done after remove_activation, as it uses 1831 // push(state) & pop(state) to preserve return value. 1832 __ convert_tos_to_retval(state); 1833 #endif // !AARCH64 1834 __ ret(); 1835 1836 return entry; 1837 } // end of ForceEarlyReturn support 1838 1839 1840 //------------------------------------------------------------------------------------------------------------------------ 1841 // Helper for vtos entry point generation 1842 1843 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1844 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1845 Label L; 1846 1847 #ifdef __SOFTFP__ 1848 dep = __ pc(); // fall through 1849 #else 1850 fep = __ pc(); __ push(ftos); __ b(L); 1851 dep = __ pc(); __ push(dtos); __ b(L); 1852 #endif // __SOFTFP__ 1853 1854 lep = __ pc(); __ push(ltos); __ b(L); 1855 1856 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1857 aep = __ pc(); __ push(atos); __ b(L); 1858 } else { 1859 aep = __ pc(); // fall through 1860 } 1861 1862 #ifdef __SOFTFP__ 1863 fep = __ pc(); // fall through 1864 #endif // __SOFTFP__ 1865 1866 bep = cep = sep = // fall through 1867 iep = __ pc(); __ push(itos); // fall through 1868 vep = __ pc(); __ bind(L); // fall through 1869 generate_and_dispatch(t); 1870 } 1871 1872 //------------------------------------------------------------------------------------------------------------------------ 1873 1874 // Non-product code 1875 #ifndef PRODUCT 1876 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1877 address entry = __ pc(); 1878 1879 // prepare expression stack 1880 __ push(state); // save tosca 1881 1882 // pass tosca registers as arguments 1883 __ mov(R2, R0_tos); 1884 #ifdef AARCH64 1885 __ mov(R3, ZR); 1886 #else 1887 __ mov(R3, R1_tos_hi); 1888 #endif // AARCH64 1889 __ mov(R1, LR); // save return address 1890 1891 // call tracer 1892 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1893 1894 __ mov(LR, R0); // restore return address 1895 __ pop(state); // restore tosca 1896 1897 // return 1898 __ ret(); 1899 1900 return entry; 1901 } 1902 1903 1904 void TemplateInterpreterGenerator::count_bytecode() { 1905 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1906 } 1907 1908 1909 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1910 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1911 } 1912 1913 1914 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1915 const Register Rindex_addr = R2_tmp; 1916 Label Lcontinue; 1917 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1918 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1919 const Register Rcounters_addr = R2_tmp; 1920 const Register Rindex = R4_tmp; 1921 1922 // calculate new index for counter: 1923 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1924 // (_index >> log2_number_of_codes) is previous bytecode 1925 1926 __ ldr_literal(Rindex_addr, Lindex); 1927 __ ldr_s32(Rindex, Address(Rindex_addr)); 1928 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1929 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1930 __ str_32(Rindex, Address(Rindex_addr)); 1931 1932 // Rindex (R4) contains index of counter 1933 1934 __ ldr_literal(Rcounters_addr, Lcounters); 1935 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1936 __ adds_32(Rtemp, Rtemp, 1); 1937 __ b(Lcontinue, mi); // avoid overflow 1938 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1939 1940 __ b(Lcontinue); 1941 1942 __ bind_literal(Lindex); 1943 __ bind_literal(Lcounters); 1944 1945 __ bind(Lcontinue); 1946 } 1947 1948 1949 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1950 // Call a little run-time stub to avoid blow-up for each bytecode. 1951 // The run-time runtime saves the right registers, depending on 1952 // the tosca in-state for the given template. 1953 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1954 "entry must have been generated"); 1955 address trace_entry = Interpreter::trace_code(t->tos_in()); 1956 __ call(trace_entry, relocInfo::none); 1957 } 1958 1959 1960 void TemplateInterpreterGenerator::stop_interpreter_at() { 1961 Label Lcontinue; 1962 const Register stop_at = R2_tmp; 1963 1964 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1965 __ mov_slow(stop_at, StopInterpreterAt); 1966 1967 // test bytecode counter 1968 __ cmp(Rtemp, stop_at); 1969 __ b(Lcontinue, ne); 1970 1971 __ trace_state("stop_interpreter_at"); 1972 __ breakpoint(); 1973 1974 __ bind(Lcontinue); 1975 } 1976 #endif // !PRODUCT