1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "interpreter/bytecodeTracer.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "prims/jvmtiThreadState.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/frame.inline.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/timer.hpp" 50 #include "runtime/vframeArray.hpp" 51 #include "utilities/debug.hpp" 52 #include <sys/types.h> 53 54 #ifndef PRODUCT 55 #include "oops/method.hpp" 56 #endif // !PRODUCT 57 58 #ifdef BUILTIN_SIM 59 #include "../../../../../../simulator/simulator.hpp" 60 #endif 61 62 // Size of interpreter code. Increase if too small. Interpreter will 63 // fail with a guarantee ("not enough space for interpreter generation"); 64 // if too small. 65 // Run with +PrintInterpreter to get the VM to print out the size. 66 // Max size with JVMTI 67 int TemplateInterpreter::InterpreterCodeSize = 200 * 1024; 68 69 #define __ _masm-> 70 71 //----------------------------------------------------------------------------- 72 73 extern "C" void entry(CodeBuffer*); 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 78 address entry = __ pc(); 79 80 __ andr(esp, esp, -16); 81 __ mov(c_rarg3, esp); 82 // rmethod 83 // rlocals 84 // c_rarg3: first stack arg - wordSize 85 86 // adjust sp 87 __ sub(sp, c_rarg3, 18 * wordSize); 88 __ str(lr, Address(__ pre(sp, -2 * wordSize))); 89 __ call_VM(noreg, 90 CAST_FROM_FN_PTR(address, 91 InterpreterRuntime::slow_signature_handler), 92 rmethod, rlocals, c_rarg3); 93 94 // r0: result handler 95 96 // Stack layout: 97 // rsp: return address <- sp 98 // 1 garbage 99 // 8 integer args (if static first is unused) 100 // 1 float/double identifiers 101 // 8 double args 102 // stack args <- esp 103 // garbage 104 // expression stack bottom 105 // bcp (NULL) 106 // ... 107 108 // Restore LR 109 __ ldr(lr, Address(__ post(sp, 2 * wordSize))); 110 111 // Do FP first so we can use c_rarg3 as temp 112 __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 113 114 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 115 const FloatRegister r = as_FloatRegister(i); 116 117 Label d, done; 118 119 __ tbnz(c_rarg3, i, d); 120 __ ldrs(r, Address(sp, (10 + i) * wordSize)); 121 __ b(done); 122 __ bind(d); 123 __ ldrd(r, Address(sp, (10 + i) * wordSize)); 124 __ bind(done); 125 } 126 127 // c_rarg0 contains the result from the call of 128 // InterpreterRuntime::slow_signature_handler so we don't touch it 129 // here. It will be loaded with the JNIEnv* later. 130 __ ldr(c_rarg1, Address(sp, 1 * wordSize)); 131 for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) { 132 Register rm = as_Register(i), rn = as_Register(i+1); 133 __ ldp(rm, rn, Address(sp, i * wordSize)); 134 } 135 136 __ add(sp, sp, 18 * wordSize); 137 __ ret(lr); 138 139 return entry; 140 } 141 142 143 // 144 // Various method entries 145 // 146 147 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 148 // rmethod: Method* 149 // r13: sender sp 150 // esp: args 151 152 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 153 154 // These don't need a safepoint check because they aren't virtually 155 // callable. We won't enter these intrinsics from compiled code. 156 // If in the future we added an intrinsic which was virtually callable 157 // we'd have to worry about how to safepoint so that this code is used. 158 159 // mathematical functions inlined by compiler 160 // (interpreter must provide identical implementation 161 // in order to avoid monotonicity bugs when switching 162 // from interpreter to compiler in the middle of some 163 // computation) 164 // 165 // stack: 166 // [ arg ] <-- esp 167 // [ arg ] 168 // retaddr in lr 169 170 address entry_point = NULL; 171 Register continuation = lr; 172 switch (kind) { 173 case Interpreter::java_lang_math_abs: 174 entry_point = __ pc(); 175 __ ldrd(v0, Address(esp)); 176 __ fabsd(v0, v0); 177 __ mov(sp, r13); // Restore caller's SP 178 break; 179 case Interpreter::java_lang_math_sqrt: 180 entry_point = __ pc(); 181 __ ldrd(v0, Address(esp)); 182 __ fsqrtd(v0, v0); 183 __ mov(sp, r13); 184 break; 185 case Interpreter::java_lang_math_sin : 186 case Interpreter::java_lang_math_cos : 187 case Interpreter::java_lang_math_tan : 188 case Interpreter::java_lang_math_log : 189 case Interpreter::java_lang_math_log10 : 190 case Interpreter::java_lang_math_exp : 191 entry_point = __ pc(); 192 __ ldrd(v0, Address(esp)); 193 __ mov(sp, r13); 194 __ mov(r19, lr); 195 continuation = r19; // The first callee-saved register 196 generate_transcendental_entry(kind, 1); 197 break; 198 case Interpreter::java_lang_math_pow : 199 entry_point = __ pc(); 200 __ mov(r19, lr); 201 continuation = r19; 202 __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize)); 203 __ ldrd(v1, Address(esp)); 204 __ mov(sp, r13); 205 generate_transcendental_entry(kind, 2); 206 break; 207 case Interpreter::java_lang_math_fmaD : 208 if (UseFMA) { 209 entry_point = __ pc(); 210 __ ldrd(v0, Address(esp, 4 * Interpreter::stackElementSize)); 211 __ ldrd(v1, Address(esp, 2 * Interpreter::stackElementSize)); 212 __ ldrd(v2, Address(esp)); 213 __ fmaddd(v0, v0, v1, v2); 214 __ mov(sp, r13); // Restore caller's SP 215 } 216 break; 217 case Interpreter::java_lang_math_fmaF : 218 if (UseFMA) { 219 entry_point = __ pc(); 220 __ ldrs(v0, Address(esp, 2 * Interpreter::stackElementSize)); 221 __ ldrs(v1, Address(esp, Interpreter::stackElementSize)); 222 __ ldrs(v2, Address(esp)); 223 __ fmadds(v0, v0, v1, v2); 224 __ mov(sp, r13); // Restore caller's SP 225 } 226 break; 227 default: 228 ; 229 } 230 if (entry_point) { 231 __ br(continuation); 232 } 233 234 return entry_point; 235 } 236 237 // double trigonometrics and transcendentals 238 // static jdouble dsin(jdouble x); 239 // static jdouble dcos(jdouble x); 240 // static jdouble dtan(jdouble x); 241 // static jdouble dlog(jdouble x); 242 // static jdouble dlog10(jdouble x); 243 // static jdouble dexp(jdouble x); 244 // static jdouble dpow(jdouble x, jdouble y); 245 246 void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) { 247 address fn; 248 switch (kind) { 249 case Interpreter::java_lang_math_sin : 250 if (StubRoutines::dsin() == NULL) { 251 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 252 } else { 253 fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin()); 254 } 255 break; 256 case Interpreter::java_lang_math_cos : 257 if (StubRoutines::dcos() == NULL) { 258 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 259 } else { 260 fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos()); 261 } 262 break; 263 case Interpreter::java_lang_math_tan : 264 if (StubRoutines::dtan() == NULL) { 265 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 266 } else { 267 fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan()); 268 } 269 break; 270 case Interpreter::java_lang_math_log : 271 if (StubRoutines::dlog() == NULL) { 272 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 273 } else { 274 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog()); 275 } 276 break; 277 case Interpreter::java_lang_math_log10 : 278 if (StubRoutines::dlog10() == NULL) { 279 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 280 } else { 281 fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10()); 282 } 283 break; 284 case Interpreter::java_lang_math_exp : 285 if (StubRoutines::dexp() == NULL) { 286 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 287 } else { 288 fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp()); 289 } 290 break; 291 case Interpreter::java_lang_math_pow : 292 fpargs = 2; 293 if (StubRoutines::dpow() == NULL) { 294 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 295 } else { 296 fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow()); 297 } 298 break; 299 default: 300 ShouldNotReachHere(); 301 fn = NULL; // unreachable 302 } 303 const int gpargs = 0, rtype = 3; 304 __ mov(rscratch1, fn); 305 __ blrt(rscratch1, gpargs, fpargs, rtype); 306 } 307 308 // Abstract method entry 309 // Attempt to execute abstract method. Throw exception 310 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 311 // rmethod: Method* 312 // r13: sender SP 313 314 address entry_point = __ pc(); 315 316 // abstract method entry 317 318 // pop return address, reset last_sp to NULL 319 __ empty_expression_stack(); 320 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 321 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 322 323 // throw exception 324 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 325 InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 326 rmethod); 327 // the call_VM checks for exception, so we should never return here. 328 __ should_not_reach_here(); 329 330 return entry_point; 331 } 332 333 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 334 address entry = __ pc(); 335 336 #ifdef ASSERT 337 { 338 Label L; 339 __ ldr(rscratch1, Address(rfp, 340 frame::interpreter_frame_monitor_block_top_offset * 341 wordSize)); 342 __ mov(rscratch2, sp); 343 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack 344 // grows negative) 345 __ br(Assembler::HS, L); // check if frame is complete 346 __ stop ("interpreter frame not set up"); 347 __ bind(L); 348 } 349 #endif // ASSERT 350 // Restore bcp under the assumption that the current frame is still 351 // interpreted 352 __ restore_bcp(); 353 354 // expression stack must be empty before entering the VM if an 355 // exception happened 356 __ empty_expression_stack(); 357 // throw exception 358 __ call_VM(noreg, 359 CAST_FROM_FN_PTR(address, 360 InterpreterRuntime::throw_StackOverflowError)); 361 return entry; 362 } 363 364 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 365 address entry = __ pc(); 366 // expression stack must be empty before entering the VM if an 367 // exception happened 368 __ empty_expression_stack(); 369 // setup parameters 370 371 // ??? convention: expect aberrant index in register r1 372 __ movw(c_rarg2, r1); 373 // ??? convention: expect array in register r3 374 __ mov(c_rarg1, r3); 375 __ call_VM(noreg, 376 CAST_FROM_FN_PTR(address, 377 InterpreterRuntime:: 378 throw_ArrayIndexOutOfBoundsException), 379 c_rarg1, c_rarg2); 380 return entry; 381 } 382 383 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 384 address entry = __ pc(); 385 386 // object is at TOS 387 __ pop(c_rarg1); 388 389 // expression stack must be empty before entering the VM if an 390 // exception happened 391 __ empty_expression_stack(); 392 393 __ call_VM(noreg, 394 CAST_FROM_FN_PTR(address, 395 InterpreterRuntime:: 396 throw_ClassCastException), 397 c_rarg1); 398 return entry; 399 } 400 401 address TemplateInterpreterGenerator::generate_exception_handler_common( 402 const char* name, const char* message, bool pass_oop) { 403 assert(!pass_oop || message == NULL, "either oop or message but not both"); 404 address entry = __ pc(); 405 if (pass_oop) { 406 // object is at TOS 407 __ pop(c_rarg2); 408 } 409 // expression stack must be empty before entering the VM if an 410 // exception happened 411 __ empty_expression_stack(); 412 // setup parameters 413 __ lea(c_rarg1, Address((address)name)); 414 if (pass_oop) { 415 __ call_VM(r0, CAST_FROM_FN_PTR(address, 416 InterpreterRuntime:: 417 create_klass_exception), 418 c_rarg1, c_rarg2); 419 } else { 420 // kind of lame ExternalAddress can't take NULL because 421 // external_word_Relocation will assert. 422 if (message != NULL) { 423 __ lea(c_rarg2, Address((address)message)); 424 } else { 425 __ mov(c_rarg2, NULL_WORD); 426 } 427 __ call_VM(r0, 428 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 429 c_rarg1, c_rarg2); 430 } 431 // throw exception 432 __ b(address(Interpreter::throw_exception_entry())); 433 return entry; 434 } 435 436 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 437 address entry = __ pc(); 438 439 // Restore stack bottom in case i2c adjusted stack 440 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 441 // and NULL it as marker that esp is now tos until next java call 442 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 443 __ restore_bcp(); 444 __ restore_locals(); 445 __ restore_constant_pool_cache(); 446 __ get_method(rmethod); 447 448 if (state == atos) { 449 Register obj = r0; 450 Register mdp = r1; 451 Register tmp = r2; 452 __ ldr(mdp, Address(rmethod, Method::method_data_offset())); 453 __ profile_return_type(mdp, obj, tmp); 454 } 455 456 // Pop N words from the stack 457 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size); 458 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 459 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); 460 461 __ add(esp, esp, r1, Assembler::LSL, 3); 462 463 // Restore machine SP 464 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 465 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 466 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 467 __ ldr(rscratch2, 468 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 469 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 470 __ andr(sp, rscratch1, -16); 471 472 #ifndef PRODUCT 473 // tell the simulator that the method has been reentered 474 if (NotifySimulator) { 475 __ notify(Assembler::method_reentry); 476 } 477 #endif 478 479 __ check_and_handle_popframe(rthread); 480 __ check_and_handle_earlyret(rthread); 481 482 __ get_dispatch(); 483 __ dispatch_next(state, step); 484 485 return entry; 486 } 487 488 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 489 int step, 490 address continuation) { 491 address entry = __ pc(); 492 __ restore_bcp(); 493 __ restore_locals(); 494 __ restore_constant_pool_cache(); 495 __ get_method(rmethod); 496 __ get_dispatch(); 497 498 // Calculate stack limit 499 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 500 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 501 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 502 __ ldr(rscratch2, 503 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 504 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 505 __ andr(sp, rscratch1, -16); 506 507 // Restore expression stack pointer 508 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 509 // NULL last_sp until next java call 510 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 511 512 #if INCLUDE_JVMCI 513 // Check if we need to take lock at entry of synchronized method. This can 514 // only occur on method entry so emit it only for vtos with step 0. 515 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 516 Label L; 517 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 518 __ cbz(rscratch1, L); 519 // Clear flag. 520 __ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset())); 521 // Take lock. 522 lock_method(); 523 __ bind(L); 524 } else { 525 #ifdef ASSERT 526 if (EnableJVMCI) { 527 Label L; 528 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 529 __ cbz(rscratch1, L); 530 __ stop("unexpected pending monitor in deopt entry"); 531 __ bind(L); 532 } 533 #endif 534 } 535 #endif 536 // handle exceptions 537 { 538 Label L; 539 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 540 __ cbz(rscratch1, L); 541 __ call_VM(noreg, 542 CAST_FROM_FN_PTR(address, 543 InterpreterRuntime::throw_pending_exception)); 544 __ should_not_reach_here(); 545 __ bind(L); 546 } 547 548 if (continuation == NULL) { 549 __ dispatch_next(state, step); 550 } else { 551 __ jump_to_entry(continuation); 552 } 553 return entry; 554 } 555 556 address TemplateInterpreterGenerator::generate_result_handler_for( 557 BasicType type) { 558 address entry = __ pc(); 559 switch (type) { 560 case T_BOOLEAN: __ uxtb(r0, r0); break; 561 case T_CHAR : __ uxth(r0, r0); break; 562 case T_BYTE : __ sxtb(r0, r0); break; 563 case T_SHORT : __ sxth(r0, r0); break; 564 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this 565 case T_LONG : /* nothing to do */ break; 566 case T_VOID : /* nothing to do */ break; 567 case T_FLOAT : /* nothing to do */ break; 568 case T_DOUBLE : /* nothing to do */ break; 569 case T_OBJECT : 570 // retrieve result from frame 571 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 572 // and verify it 573 __ verify_oop(r0); 574 break; 575 default : ShouldNotReachHere(); 576 } 577 __ ret(lr); // return from result handler 578 return entry; 579 } 580 581 address TemplateInterpreterGenerator::generate_safept_entry_for( 582 TosState state, 583 address runtime_entry) { 584 address entry = __ pc(); 585 __ push(state); 586 __ call_VM(noreg, runtime_entry); 587 __ membar(Assembler::AnyAny); 588 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 589 return entry; 590 } 591 592 // Helpers for commoning out cases in the various type of method entries. 593 // 594 595 596 // increment invocation count & check for overflow 597 // 598 // Note: checking for negative value instead of overflow 599 // so we have a 'sticky' overflow test 600 // 601 // rmethod: method 602 // 603 void TemplateInterpreterGenerator::generate_counter_incr( 604 Label* overflow, 605 Label* profile_method, 606 Label* profile_method_continue) { 607 Label done; 608 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 609 if (TieredCompilation) { 610 int increment = InvocationCounter::count_increment; 611 Label no_mdo; 612 if (ProfileInterpreter) { 613 // Are we profiling? 614 __ ldr(r0, Address(rmethod, Method::method_data_offset())); 615 __ cbz(r0, no_mdo); 616 // Increment counter in the MDO 617 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + 618 in_bytes(InvocationCounter::counter_offset())); 619 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset())); 620 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 621 __ b(done); 622 } 623 __ bind(no_mdo); 624 // Increment counter in MethodCounters 625 const Address invocation_counter(rscratch2, 626 MethodCounters::invocation_counter_offset() + 627 InvocationCounter::counter_offset()); 628 __ get_method_counters(rmethod, rscratch2, done); 629 const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset())); 630 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow); 631 __ bind(done); 632 } else { // not TieredCompilation 633 const Address backedge_counter(rscratch2, 634 MethodCounters::backedge_counter_offset() + 635 InvocationCounter::counter_offset()); 636 const Address invocation_counter(rscratch2, 637 MethodCounters::invocation_counter_offset() + 638 InvocationCounter::counter_offset()); 639 640 __ get_method_counters(rmethod, rscratch2, done); 641 642 if (ProfileInterpreter) { // %%% Merge this into MethodData* 643 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 644 __ addw(r1, r1, 1); 645 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 646 } 647 // Update standard invocation counters 648 __ ldrw(r1, invocation_counter); 649 __ ldrw(r0, backedge_counter); 650 651 __ addw(r1, r1, InvocationCounter::count_increment); 652 __ andw(r0, r0, InvocationCounter::count_mask_value); 653 654 __ strw(r1, invocation_counter); 655 __ addw(r0, r0, r1); // add both counters 656 657 // profile_method is non-null only for interpreted method so 658 // profile_method != NULL == !native_call 659 660 if (ProfileInterpreter && profile_method != NULL) { 661 // Test to see if we should create a method data oop 662 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 663 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 664 __ cmpw(r0, rscratch2); 665 __ br(Assembler::LT, *profile_method_continue); 666 667 // if no method data exists, go to profile_method 668 __ test_method_data_pointer(rscratch2, *profile_method); 669 } 670 671 { 672 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 673 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 674 __ cmpw(r0, rscratch2); 675 __ br(Assembler::HS, *overflow); 676 } 677 __ bind(done); 678 } 679 } 680 681 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 682 683 // Asm interpreter on entry 684 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 685 // Everything as it was on entry 686 687 // InterpreterRuntime::frequency_counter_overflow takes two 688 // arguments, the first (thread) is passed by call_VM, the second 689 // indicates if the counter overflow occurs at a backwards branch 690 // (NULL bcp). We pass zero for it. The call returns the address 691 // of the verified entry point for the method or NULL if the 692 // compilation did not complete (either went background or bailed 693 // out). 694 __ mov(c_rarg1, 0); 695 __ call_VM(noreg, 696 CAST_FROM_FN_PTR(address, 697 InterpreterRuntime::frequency_counter_overflow), 698 c_rarg1); 699 700 __ b(do_continue); 701 } 702 703 // See if we've got enough room on the stack for locals plus overhead 704 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 705 // without going through the signal handler, i.e., reserved and yellow zones 706 // will not be made usable. The shadow zone must suffice to handle the 707 // overflow. 708 // The expression stack grows down incrementally, so the normal guard 709 // page mechanism will work for that. 710 // 711 // NOTE: Since the additional locals are also always pushed (wasn't 712 // obvious in generate_method_entry) so the guard should work for them 713 // too. 714 // 715 // Args: 716 // r3: number of additional locals this frame needs (what we must check) 717 // rmethod: Method* 718 // 719 // Kills: 720 // r0 721 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 722 723 // monitor entry size: see picture of stack set 724 // (generate_method_entry) and frame_amd64.hpp 725 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 726 727 // total overhead size: entry_size + (saved rbp through expr stack 728 // bottom). be sure to change this if you add/subtract anything 729 // to/from the overhead area 730 const int overhead_size = 731 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 732 733 const int page_size = os::vm_page_size(); 734 735 Label after_frame_check; 736 737 // see if the frame is greater than one page in size. If so, 738 // then we need to verify there is enough stack space remaining 739 // for the additional locals. 740 // 741 // Note that we use SUBS rather than CMP here because the immediate 742 // field of this instruction may overflow. SUBS can cope with this 743 // because it is a macro that will expand to some number of MOV 744 // instructions and a register operation. 745 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); 746 __ br(Assembler::LS, after_frame_check); 747 748 // compute rsp as if this were going to be the last frame on 749 // the stack before the red zone 750 751 // locals + overhead, in bytes 752 __ mov(r0, overhead_size); 753 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. 754 755 const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset()); 756 __ ldr(rscratch1, stack_limit); 757 758 #ifdef ASSERT 759 Label limit_okay; 760 // Verify that thread stack limit is non-zero. 761 __ cbnz(rscratch1, limit_okay); 762 __ stop("stack overflow limit is zero"); 763 __ bind(limit_okay); 764 #endif 765 766 // Add stack limit to locals. 767 __ add(r0, r0, rscratch1); 768 769 // Check against the current stack bottom. 770 __ cmp(sp, r0); 771 __ br(Assembler::HI, after_frame_check); 772 773 // Remove the incoming args, peeling the machine SP back to where it 774 // was in the caller. This is not strictly necessary, but unless we 775 // do so the stack frame may have a garbage FP; this ensures a 776 // correct call stack that we can always unwind. The ANDR should be 777 // unnecessary because the sender SP in r13 is always aligned, but 778 // it doesn't hurt. 779 __ andr(sp, r13, -16); 780 781 // Note: the restored frame is not necessarily interpreted. 782 // Use the shared runtime version of the StackOverflowError. 783 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 784 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 785 786 // all done with frame size check 787 __ bind(after_frame_check); 788 } 789 790 // Allocate monitor and lock method (asm interpreter) 791 // 792 // Args: 793 // rmethod: Method* 794 // rlocals: locals 795 // 796 // Kills: 797 // r0 798 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 799 // rscratch1, rscratch2 (scratch regs) 800 void TemplateInterpreterGenerator::lock_method() { 801 // synchronize method 802 const Address access_flags(rmethod, Method::access_flags_offset()); 803 const Address monitor_block_top( 804 rfp, 805 frame::interpreter_frame_monitor_block_top_offset * wordSize); 806 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 807 808 #ifdef ASSERT 809 { 810 Label L; 811 __ ldrw(r0, access_flags); 812 __ tst(r0, JVM_ACC_SYNCHRONIZED); 813 __ br(Assembler::NE, L); 814 __ stop("method doesn't need synchronization"); 815 __ bind(L); 816 } 817 #endif // ASSERT 818 819 // get synchronization object 820 { 821 Label done; 822 __ ldrw(r0, access_flags); 823 __ tst(r0, JVM_ACC_STATIC); 824 // get receiver (assume this is frequent case) 825 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 826 __ br(Assembler::EQ, done); 827 __ load_mirror(r0, rmethod); 828 829 #ifdef ASSERT 830 { 831 Label L; 832 __ cbnz(r0, L); 833 __ stop("synchronization object is NULL"); 834 __ bind(L); 835 } 836 #endif // ASSERT 837 838 __ bind(done); 839 } 840 841 // add space for monitor & lock 842 __ sub(sp, sp, entry_size); // add space for a monitor entry 843 __ sub(esp, esp, entry_size); 844 __ mov(rscratch1, esp); 845 __ str(rscratch1, monitor_block_top); // set new monitor block top 846 // store object 847 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); 848 __ mov(c_rarg1, esp); // object address 849 __ lock_object(c_rarg1); 850 } 851 852 // Generate a fixed interpreter frame. This is identical setup for 853 // interpreted methods and for native methods hence the shared code. 854 // 855 // Args: 856 // lr: return address 857 // rmethod: Method* 858 // rlocals: pointer to locals 859 // rcpool: cp cache 860 // stack_pointer: previous sp 861 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 862 // initialize fixed part of activation frame 863 if (native_call) { 864 __ sub(esp, sp, 14 * wordSize); 865 __ mov(rbcp, zr); 866 __ stp(esp, zr, Address(__ pre(sp, -14 * wordSize))); 867 // add 2 zero-initialized slots for native calls 868 __ stp(zr, zr, Address(sp, 12 * wordSize)); 869 } else { 870 __ sub(esp, sp, 12 * wordSize); 871 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod 872 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase 873 __ stp(esp, rbcp, Address(__ pre(sp, -12 * wordSize))); 874 } 875 876 if (ProfileInterpreter) { 877 Label method_data_continue; 878 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); 879 __ cbz(rscratch1, method_data_continue); 880 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); 881 __ bind(method_data_continue); 882 __ stp(rscratch1, rmethod, Address(sp, 6 * wordSize)); // save Method* and mdp (method data pointer) 883 } else { 884 __ stp(zr, rmethod, Address(sp, 6 * wordSize)); // save Method* (no mdp) 885 } 886 887 // Get mirror and store it in the frame as GC root for this Method* 888 __ load_mirror(rscratch1, rmethod); 889 __ stp(rscratch1, zr, Address(sp, 4 * wordSize)); 890 891 __ ldr(rcpool, Address(rmethod, Method::const_offset())); 892 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); 893 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); 894 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); 895 896 __ stp(rfp, lr, Address(sp, 10 * wordSize)); 897 __ lea(rfp, Address(sp, 10 * wordSize)); 898 899 // set sender sp 900 // leave last_sp as null 901 __ stp(zr, r13, Address(sp, 8 * wordSize)); 902 903 // Move SP out of the way 904 if (! native_call) { 905 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 906 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 907 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 908 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); 909 __ andr(sp, rscratch1, -16); 910 } 911 } 912 913 // End of helpers 914 915 // Various method entries 916 //------------------------------------------------------------------------------------------------------------------------ 917 // 918 // 919 920 // Method entry for java.lang.ref.Reference.get. 921 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 922 // Code: _aload_0, _getfield, _areturn 923 // parameter size = 1 924 // 925 // The code that gets generated by this routine is split into 2 parts: 926 // 1. The "intrinsified" code for G1 (or any SATB based GC), 927 // 2. The slow path - which is an expansion of the regular method entry. 928 // 929 // Notes:- 930 // * In the G1 code we do not check whether we need to block for 931 // a safepoint. If G1 is enabled then we must execute the specialized 932 // code for Reference.get (except when the Reference object is null) 933 // so that we can log the value in the referent field with an SATB 934 // update buffer. 935 // If the code for the getfield template is modified so that the 936 // G1 pre-barrier code is executed when the current method is 937 // Reference.get() then going through the normal method entry 938 // will be fine. 939 // * The G1 code can, however, check the receiver object (the instance 940 // of java.lang.Reference) and jump to the slow path if null. If the 941 // Reference object is null then we obviously cannot fetch the referent 942 // and so we don't need to call the G1 pre-barrier. Thus we can use the 943 // regular method entry code to generate the NPE. 944 // 945 // This code is based on generate_accessor_entry. 946 // 947 // rmethod: Method* 948 // r13: senderSP must preserve for slow path, set SP to it on fast path 949 950 // LR is live. It must be saved around calls. 951 952 address entry = __ pc(); 953 954 const int referent_offset = java_lang_ref_Reference::referent_offset; 955 guarantee(referent_offset > 0, "referent offset not initialized"); 956 957 Label slow_path; 958 const Register local_0 = c_rarg0; 959 // Check if local 0 != NULL 960 // If the receiver is null then it is OK to jump to the slow path. 961 __ ldr(local_0, Address(esp, 0)); 962 __ cbz(local_0, slow_path); 963 964 __ mov(r19, r13); // Move senderSP to a callee-saved register 965 966 // Load the value of the referent field. 967 const Address field_address(local_0, referent_offset); 968 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 969 bs->load_at(_masm, IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ rscratch2, /*tmp2*/ rscratch1); 970 971 // areturn 972 __ andr(sp, r19, -16); // done with stack 973 __ ret(lr); 974 975 // generate a vanilla interpreter entry as the slow path 976 __ bind(slow_path); 977 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 978 return entry; 979 980 } 981 982 /** 983 * Method entry for static native methods: 984 * int java.util.zip.CRC32.update(int crc, int b) 985 */ 986 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 987 if (UseCRC32Intrinsics) { 988 address entry = __ pc(); 989 990 // rmethod: Method* 991 // r13: senderSP must preserved for slow path 992 // esp: args 993 994 Label slow_path; 995 // If we need a safepoint check, generate full interpreter entry. 996 __ safepoint_poll(slow_path); 997 998 // We don't generate local frame and don't align stack because 999 // we call stub code and there is no safepoint on this path. 1000 1001 // Load parameters 1002 const Register crc = c_rarg0; // crc 1003 const Register val = c_rarg1; // source java byte value 1004 const Register tbl = c_rarg2; // scratch 1005 1006 // Arguments are reversed on java expression stack 1007 __ ldrw(val, Address(esp, 0)); // byte value 1008 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC 1009 1010 unsigned long offset; 1011 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); 1012 __ add(tbl, tbl, offset); 1013 1014 __ mvnw(crc, crc); // ~crc 1015 __ update_byte_crc32(crc, val, tbl); 1016 __ mvnw(crc, crc); // ~crc 1017 1018 // result in c_rarg0 1019 1020 __ andr(sp, r13, -16); 1021 __ ret(lr); 1022 1023 // generate a vanilla native entry as the slow path 1024 __ bind(slow_path); 1025 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1026 return entry; 1027 } 1028 return NULL; 1029 } 1030 1031 /** 1032 * Method entry for static native methods: 1033 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 1034 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 1035 */ 1036 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1037 if (UseCRC32Intrinsics) { 1038 address entry = __ pc(); 1039 1040 // rmethod,: Method* 1041 // r13: senderSP must preserved for slow path 1042 1043 Label slow_path; 1044 // If we need a safepoint check, generate full interpreter entry. 1045 __ safepoint_poll(slow_path); 1046 1047 // We don't generate local frame and don't align stack because 1048 // we call stub code and there is no safepoint on this path. 1049 1050 // Load parameters 1051 const Register crc = c_rarg0; // crc 1052 const Register buf = c_rarg1; // source java byte array address 1053 const Register len = c_rarg2; // length 1054 const Register off = len; // offset (never overlaps with 'len') 1055 1056 // Arguments are reversed on java expression stack 1057 // Calculate address of start element 1058 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 1059 __ ldr(buf, Address(esp, 2*wordSize)); // long buf 1060 __ ldrw(off, Address(esp, wordSize)); // offset 1061 __ add(buf, buf, off); // + offset 1062 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC 1063 } else { 1064 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array 1065 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 1066 __ ldrw(off, Address(esp, wordSize)); // offset 1067 __ add(buf, buf, off); // + offset 1068 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC 1069 } 1070 // Can now load 'len' since we're finished with 'off' 1071 __ ldrw(len, Address(esp, 0x0)); // Length 1072 1073 __ andr(sp, r13, -16); // Restore the caller's SP 1074 1075 // We are frameless so we can just jump to the stub. 1076 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); 1077 1078 // generate a vanilla native entry as the slow path 1079 __ bind(slow_path); 1080 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1081 return entry; 1082 } 1083 return NULL; 1084 } 1085 1086 /** 1087 * Method entry for intrinsic-candidate (non-native) methods: 1088 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) 1089 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end) 1090 * Unlike CRC32, CRC32C does not have any methods marked as native 1091 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1092 */ 1093 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1094 if (UseCRC32CIntrinsics) { 1095 address entry = __ pc(); 1096 1097 // Prepare jump to stub using parameters from the stack 1098 const Register crc = c_rarg0; // initial crc 1099 const Register buf = c_rarg1; // source java byte array address 1100 const Register len = c_rarg2; // len argument to the kernel 1101 1102 const Register end = len; // index of last element to process 1103 const Register off = crc; // offset 1104 1105 __ ldrw(end, Address(esp)); // int end 1106 __ ldrw(off, Address(esp, wordSize)); // int offset 1107 __ sub(len, end, off); 1108 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] buf | long buf 1109 __ add(buf, buf, off); // + offset 1110 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { 1111 __ ldrw(crc, Address(esp, 4*wordSize)); // long crc 1112 } else { 1113 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 1114 __ ldrw(crc, Address(esp, 3*wordSize)); // long crc 1115 } 1116 1117 __ andr(sp, r13, -16); // Restore the caller's SP 1118 1119 // Jump to the stub. 1120 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C())); 1121 1122 return entry; 1123 } 1124 return NULL; 1125 } 1126 1127 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1128 // Bang each page in the shadow zone. We can't assume it's been done for 1129 // an interpreter frame with greater than a page of locals, so each page 1130 // needs to be checked. Only true for non-native. 1131 if (UseStackBanging) { 1132 const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size(); 1133 const int start_page = native_call ? n_shadow_pages : 1; 1134 const int page_size = os::vm_page_size(); 1135 for (int pages = start_page; pages <= n_shadow_pages ; pages++) { 1136 __ sub(rscratch2, sp, pages*page_size); 1137 __ str(zr, Address(rscratch2)); 1138 } 1139 } 1140 } 1141 1142 1143 // Interpreter stub for calling a native method. (asm interpreter) 1144 // This sets up a somewhat different looking stack for calling the 1145 // native method than the typical interpreter frame setup. 1146 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1147 // determine code generation flags 1148 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1149 1150 // r1: Method* 1151 // rscratch1: sender sp 1152 1153 address entry_point = __ pc(); 1154 1155 const Address constMethod (rmethod, Method::const_offset()); 1156 const Address access_flags (rmethod, Method::access_flags_offset()); 1157 const Address size_of_parameters(r2, ConstMethod:: 1158 size_of_parameters_offset()); 1159 1160 // get parameter size (always needed) 1161 __ ldr(r2, constMethod); 1162 __ load_unsigned_short(r2, size_of_parameters); 1163 1164 // Native calls don't need the stack size check since they have no 1165 // expression stack and the arguments are already on the stack and 1166 // we only add a handful of words to the stack. 1167 1168 // rmethod: Method* 1169 // r2: size of parameters 1170 // rscratch1: sender sp 1171 1172 // for natives the size of locals is zero 1173 1174 // compute beginning of parameters (rlocals) 1175 __ add(rlocals, esp, r2, ext::uxtx, 3); 1176 __ add(rlocals, rlocals, -wordSize); 1177 1178 // Pull SP back to minimum size: this avoids holes in the stack 1179 __ andr(sp, esp, -16); 1180 1181 // initialize fixed part of activation frame 1182 generate_fixed_frame(true); 1183 #ifndef PRODUCT 1184 // tell the simulator that a method has been entered 1185 if (NotifySimulator) { 1186 __ notify(Assembler::method_entry); 1187 } 1188 #endif 1189 1190 // make sure method is native & not abstract 1191 #ifdef ASSERT 1192 __ ldrw(r0, access_flags); 1193 { 1194 Label L; 1195 __ tst(r0, JVM_ACC_NATIVE); 1196 __ br(Assembler::NE, L); 1197 __ stop("tried to execute non-native method as native"); 1198 __ bind(L); 1199 } 1200 { 1201 Label L; 1202 __ tst(r0, JVM_ACC_ABSTRACT); 1203 __ br(Assembler::EQ, L); 1204 __ stop("tried to execute abstract method in interpreter"); 1205 __ bind(L); 1206 } 1207 #endif 1208 1209 // Since at this point in the method invocation the exception 1210 // handler would try to exit the monitor of synchronized methods 1211 // which hasn't been entered yet, we set the thread local variable 1212 // _do_not_unlock_if_synchronized to true. The remove_activation 1213 // will check this flag. 1214 1215 const Address do_not_unlock_if_synchronized(rthread, 1216 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1217 __ mov(rscratch2, true); 1218 __ strb(rscratch2, do_not_unlock_if_synchronized); 1219 1220 // increment invocation count & check for overflow 1221 Label invocation_counter_overflow; 1222 if (inc_counter) { 1223 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1224 } 1225 1226 Label continue_after_compile; 1227 __ bind(continue_after_compile); 1228 1229 bang_stack_shadow_pages(true); 1230 1231 // reset the _do_not_unlock_if_synchronized flag 1232 __ strb(zr, do_not_unlock_if_synchronized); 1233 1234 // check for synchronized methods 1235 // Must happen AFTER invocation_counter check and stack overflow check, 1236 // so method is not locked if overflows. 1237 if (synchronized) { 1238 lock_method(); 1239 } else { 1240 // no synchronization necessary 1241 #ifdef ASSERT 1242 { 1243 Label L; 1244 __ ldrw(r0, access_flags); 1245 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1246 __ br(Assembler::EQ, L); 1247 __ stop("method needs synchronization"); 1248 __ bind(L); 1249 } 1250 #endif 1251 } 1252 1253 // start execution 1254 #ifdef ASSERT 1255 { 1256 Label L; 1257 const Address monitor_block_top(rfp, 1258 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1259 __ ldr(rscratch1, monitor_block_top); 1260 __ cmp(esp, rscratch1); 1261 __ br(Assembler::EQ, L); 1262 __ stop("broken stack frame setup in interpreter"); 1263 __ bind(L); 1264 } 1265 #endif 1266 1267 // jvmti support 1268 __ notify_method_entry(); 1269 1270 // work registers 1271 const Register t = r17; 1272 const Register result_handler = r19; 1273 1274 // allocate space for parameters 1275 __ ldr(t, Address(rmethod, Method::const_offset())); 1276 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1277 1278 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); 1279 __ andr(sp, rscratch1, -16); 1280 __ mov(esp, rscratch1); 1281 1282 // get signature handler 1283 { 1284 Label L; 1285 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1286 __ cbnz(t, L); 1287 __ call_VM(noreg, 1288 CAST_FROM_FN_PTR(address, 1289 InterpreterRuntime::prepare_native_call), 1290 rmethod); 1291 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1292 __ bind(L); 1293 } 1294 1295 // call signature handler 1296 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1297 "adjust this code"); 1298 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1299 "adjust this code"); 1300 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1301 "adjust this code"); 1302 1303 // The generated handlers do not touch rmethod (the method). 1304 // However, large signatures cannot be cached and are generated 1305 // each time here. The slow-path generator can do a GC on return, 1306 // so we must reload it after the call. 1307 __ blr(t); 1308 __ get_method(rmethod); // slow path can do a GC, reload rmethod 1309 1310 1311 // result handler is in r0 1312 // set result handler 1313 __ mov(result_handler, r0); 1314 // pass mirror handle if static call 1315 { 1316 Label L; 1317 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1318 __ tbz(t, exact_log2(JVM_ACC_STATIC), L); 1319 // get mirror 1320 __ load_mirror(t, rmethod); 1321 // copy mirror into activation frame 1322 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1323 // pass handle to mirror 1324 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); 1325 __ bind(L); 1326 } 1327 1328 // get native function entry point in r10 1329 { 1330 Label L; 1331 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1332 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1333 __ mov(rscratch2, unsatisfied); 1334 __ ldr(rscratch2, rscratch2); 1335 __ cmp(r10, rscratch2); 1336 __ br(Assembler::NE, L); 1337 __ call_VM(noreg, 1338 CAST_FROM_FN_PTR(address, 1339 InterpreterRuntime::prepare_native_call), 1340 rmethod); 1341 __ get_method(rmethod); 1342 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1343 __ bind(L); 1344 } 1345 1346 // pass JNIEnv 1347 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); 1348 1349 // It is enough that the pc() points into the right code 1350 // segment. It does not have to be the correct return pc. 1351 __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1); 1352 1353 // change thread state 1354 #ifdef ASSERT 1355 { 1356 Label L; 1357 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); 1358 __ cmp(t, _thread_in_Java); 1359 __ br(Assembler::EQ, L); 1360 __ stop("Wrong thread state in native stub"); 1361 __ bind(L); 1362 } 1363 #endif 1364 1365 // Change state to native 1366 __ mov(rscratch1, _thread_in_native); 1367 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1368 __ stlrw(rscratch1, rscratch2); 1369 1370 // Call the native method. 1371 __ blrt(r10, rscratch1); 1372 __ maybe_isb(); 1373 __ get_method(rmethod); 1374 // result potentially in r0 or v0 1375 1376 // make room for the pushes we're about to do 1377 __ sub(rscratch1, esp, 4 * wordSize); 1378 __ andr(sp, rscratch1, -16); 1379 1380 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1381 // in order to extract the result of a method call. If the order of these 1382 // pushes change or anything else is added to the stack then the code in 1383 // interpreter_frame_result must also change. 1384 __ push(dtos); 1385 __ push(ltos); 1386 1387 // change thread state 1388 __ mov(rscratch1, _thread_in_native_trans); 1389 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1390 __ stlrw(rscratch1, rscratch2); 1391 1392 if (os::is_MP()) { 1393 if (UseMembar) { 1394 // Force this write out before the read below 1395 __ dmb(Assembler::ISH); 1396 } else { 1397 // Write serialization page so VM thread can do a pseudo remote membar. 1398 // We use the current thread pointer to calculate a thread specific 1399 // offset to write to within the page. This minimizes bus traffic 1400 // due to cache line collision. 1401 __ serialize_memory(rthread, rscratch2); 1402 } 1403 } 1404 1405 // check for safepoint operation in progress and/or pending suspend requests 1406 { 1407 Label L, Continue; 1408 __ safepoint_poll_acquire(L); 1409 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); 1410 __ cbz(rscratch2, Continue); 1411 __ bind(L); 1412 1413 // Don't use call_VM as it will see a possible pending exception 1414 // and forward it and never return here preventing us from 1415 // clearing _last_native_pc down below. So we do a runtime call by 1416 // hand. 1417 // 1418 __ mov(c_rarg0, rthread); 1419 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1420 __ blrt(rscratch2, 1, 0, 0); 1421 __ maybe_isb(); 1422 __ get_method(rmethod); 1423 __ reinit_heapbase(); 1424 __ bind(Continue); 1425 } 1426 1427 // change thread state 1428 __ mov(rscratch1, _thread_in_Java); 1429 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1430 __ stlrw(rscratch1, rscratch2); 1431 1432 // reset_last_Java_frame 1433 __ reset_last_Java_frame(true); 1434 1435 if (CheckJNICalls) { 1436 // clear_pending_jni_exception_check 1437 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1438 } 1439 1440 // reset handle block 1441 __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); 1442 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); 1443 1444 // If result is an oop unbox and store it in frame where gc will see it 1445 // and result handler will pick it up 1446 1447 { 1448 Label no_oop, not_weak, store_result; 1449 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1450 __ cmp(t, result_handler); 1451 __ br(Assembler::NE, no_oop); 1452 // Unbox oop result, e.g. JNIHandles::resolve result. 1453 __ pop(ltos); 1454 __ resolve_jobject(r0, rthread, t); 1455 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 1456 // keep stack depth as expected by pushing oop which will eventually be discarded 1457 __ push(ltos); 1458 __ bind(no_oop); 1459 } 1460 1461 { 1462 Label no_reguard; 1463 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1464 __ ldrw(rscratch1, Address(rscratch1)); 1465 __ cmp(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); 1466 __ br(Assembler::NE, no_reguard); 1467 1468 __ pusha(); // XXX only save smashed registers 1469 __ mov(c_rarg0, rthread); 1470 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1471 __ blrt(rscratch2, 0, 0, 0); 1472 __ popa(); // XXX only restore smashed registers 1473 __ bind(no_reguard); 1474 } 1475 1476 // The method register is junk from after the thread_in_native transition 1477 // until here. Also can't call_VM until the bcp has been 1478 // restored. Need bcp for throwing exception below so get it now. 1479 __ get_method(rmethod); 1480 1481 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1482 // rbcp == code_base() 1483 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* 1484 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1485 // handle exceptions (exception handling will handle unlocking!) 1486 { 1487 Label L; 1488 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 1489 __ cbz(rscratch1, L); 1490 // Note: At some point we may want to unify this with the code 1491 // used in call_VM_base(); i.e., we should use the 1492 // StubRoutines::forward_exception code. For now this doesn't work 1493 // here because the rsp is not correctly set at this point. 1494 __ MacroAssembler::call_VM(noreg, 1495 CAST_FROM_FN_PTR(address, 1496 InterpreterRuntime::throw_pending_exception)); 1497 __ should_not_reach_here(); 1498 __ bind(L); 1499 } 1500 1501 // do unlocking if necessary 1502 { 1503 Label L; 1504 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1505 __ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L); 1506 // the code below should be shared with interpreter macro 1507 // assembler implementation 1508 { 1509 Label unlock; 1510 // BasicObjectLock will be first in list, since this is a 1511 // synchronized method. However, need to check that the object 1512 // has not been unlocked by an explicit monitorexit bytecode. 1513 1514 // monitor expect in c_rarg1 for slow unlock path 1515 __ lea (c_rarg1, Address(rfp, // address of first monitor 1516 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1517 wordSize - sizeof(BasicObjectLock)))); 1518 1519 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1520 __ cbnz(t, unlock); 1521 1522 // Entry already unlocked, need to throw exception 1523 __ MacroAssembler::call_VM(noreg, 1524 CAST_FROM_FN_PTR(address, 1525 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1526 __ should_not_reach_here(); 1527 1528 __ bind(unlock); 1529 __ unlock_object(c_rarg1); 1530 } 1531 __ bind(L); 1532 } 1533 1534 // jvmti support 1535 // Note: This must happen _after_ handling/throwing any exceptions since 1536 // the exception handler code notifies the runtime of method exits 1537 // too. If this happens before, method entry/exit notifications are 1538 // not properly paired (was bug - gri 11/22/99). 1539 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1540 1541 // restore potential result in r0:d0, call result handler to 1542 // restore potential result in ST0 & handle result 1543 1544 __ pop(ltos); 1545 __ pop(dtos); 1546 1547 __ blr(result_handler); 1548 1549 // remove activation 1550 __ ldr(esp, Address(rfp, 1551 frame::interpreter_frame_sender_sp_offset * 1552 wordSize)); // get sender sp 1553 // remove frame anchor 1554 __ leave(); 1555 1556 // resture sender sp 1557 __ mov(sp, esp); 1558 1559 __ ret(lr); 1560 1561 if (inc_counter) { 1562 // Handle overflow of counter and compile method 1563 __ bind(invocation_counter_overflow); 1564 generate_counter_overflow(continue_after_compile); 1565 } 1566 1567 return entry_point; 1568 } 1569 1570 // 1571 // Generic interpreted method entry to (asm) interpreter 1572 // 1573 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1574 // determine code generation flags 1575 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1576 1577 // rscratch1: sender sp 1578 address entry_point = __ pc(); 1579 1580 const Address constMethod(rmethod, Method::const_offset()); 1581 const Address access_flags(rmethod, Method::access_flags_offset()); 1582 const Address size_of_parameters(r3, 1583 ConstMethod::size_of_parameters_offset()); 1584 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); 1585 1586 // get parameter size (always needed) 1587 // need to load the const method first 1588 __ ldr(r3, constMethod); 1589 __ load_unsigned_short(r2, size_of_parameters); 1590 1591 // r2: size of parameters 1592 1593 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words 1594 __ sub(r3, r3, r2); // r3 = no. of additional locals 1595 1596 // see if we've got enough room on the stack for locals plus overhead. 1597 generate_stack_overflow_check(); 1598 1599 // compute beginning of parameters (rlocals) 1600 __ add(rlocals, esp, r2, ext::uxtx, 3); 1601 __ sub(rlocals, rlocals, wordSize); 1602 1603 // Make room for locals 1604 __ sub(rscratch1, esp, r3, ext::uxtx, 3); 1605 __ andr(sp, rscratch1, -16); 1606 1607 // r3 - # of additional locals 1608 // allocate space for locals 1609 // explicitly initialize locals 1610 { 1611 Label exit, loop; 1612 __ ands(zr, r3, r3); 1613 __ br(Assembler::LE, exit); // do nothing if r3 <= 0 1614 __ bind(loop); 1615 __ str(zr, Address(__ post(rscratch1, wordSize))); 1616 __ sub(r3, r3, 1); // until everything initialized 1617 __ cbnz(r3, loop); 1618 __ bind(exit); 1619 } 1620 1621 // And the base dispatch table 1622 __ get_dispatch(); 1623 1624 // initialize fixed part of activation frame 1625 generate_fixed_frame(false); 1626 #ifndef PRODUCT 1627 // tell the simulator that a method has been entered 1628 if (NotifySimulator) { 1629 __ notify(Assembler::method_entry); 1630 } 1631 #endif 1632 // make sure method is not native & not abstract 1633 #ifdef ASSERT 1634 __ ldrw(r0, access_flags); 1635 { 1636 Label L; 1637 __ tst(r0, JVM_ACC_NATIVE); 1638 __ br(Assembler::EQ, L); 1639 __ stop("tried to execute native method as non-native"); 1640 __ bind(L); 1641 } 1642 { 1643 Label L; 1644 __ tst(r0, JVM_ACC_ABSTRACT); 1645 __ br(Assembler::EQ, L); 1646 __ stop("tried to execute abstract method in interpreter"); 1647 __ bind(L); 1648 } 1649 #endif 1650 1651 // Since at this point in the method invocation the exception 1652 // handler would try to exit the monitor of synchronized methods 1653 // which hasn't been entered yet, we set the thread local variable 1654 // _do_not_unlock_if_synchronized to true. The remove_activation 1655 // will check this flag. 1656 1657 const Address do_not_unlock_if_synchronized(rthread, 1658 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1659 __ mov(rscratch2, true); 1660 __ strb(rscratch2, do_not_unlock_if_synchronized); 1661 1662 Label no_mdp; 1663 Register mdp = r3; 1664 __ ldr(mdp, Address(rmethod, Method::method_data_offset())); 1665 __ cbz(mdp, no_mdp); 1666 __ add(mdp, mdp, in_bytes(MethodData::data_offset())); 1667 __ profile_parameters_type(mdp, r1, r2); 1668 __ bind(no_mdp); 1669 1670 // increment invocation count & check for overflow 1671 Label invocation_counter_overflow; 1672 Label profile_method; 1673 Label profile_method_continue; 1674 if (inc_counter) { 1675 generate_counter_incr(&invocation_counter_overflow, 1676 &profile_method, 1677 &profile_method_continue); 1678 if (ProfileInterpreter) { 1679 __ bind(profile_method_continue); 1680 } 1681 } 1682 1683 Label continue_after_compile; 1684 __ bind(continue_after_compile); 1685 1686 bang_stack_shadow_pages(false); 1687 1688 // reset the _do_not_unlock_if_synchronized flag 1689 __ strb(zr, do_not_unlock_if_synchronized); 1690 1691 // check for synchronized methods 1692 // Must happen AFTER invocation_counter check and stack overflow check, 1693 // so method is not locked if overflows. 1694 if (synchronized) { 1695 // Allocate monitor and lock method 1696 lock_method(); 1697 } else { 1698 // no synchronization necessary 1699 #ifdef ASSERT 1700 { 1701 Label L; 1702 __ ldrw(r0, access_flags); 1703 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1704 __ br(Assembler::EQ, L); 1705 __ stop("method needs synchronization"); 1706 __ bind(L); 1707 } 1708 #endif 1709 } 1710 1711 // start execution 1712 #ifdef ASSERT 1713 { 1714 Label L; 1715 const Address monitor_block_top (rfp, 1716 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1717 __ ldr(rscratch1, monitor_block_top); 1718 __ cmp(esp, rscratch1); 1719 __ br(Assembler::EQ, L); 1720 __ stop("broken stack frame setup in interpreter"); 1721 __ bind(L); 1722 } 1723 #endif 1724 1725 // jvmti support 1726 __ notify_method_entry(); 1727 1728 __ dispatch_next(vtos); 1729 1730 // invocation counter overflow 1731 if (inc_counter) { 1732 if (ProfileInterpreter) { 1733 // We have decided to profile this method in the interpreter 1734 __ bind(profile_method); 1735 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1736 __ set_method_data_pointer_for_bcp(); 1737 // don't think we need this 1738 __ get_method(r1); 1739 __ b(profile_method_continue); 1740 } 1741 // Handle overflow of counter and compile method 1742 __ bind(invocation_counter_overflow); 1743 generate_counter_overflow(continue_after_compile); 1744 } 1745 1746 return entry_point; 1747 } 1748 1749 //----------------------------------------------------------------------------- 1750 // Exceptions 1751 1752 void TemplateInterpreterGenerator::generate_throw_exception() { 1753 // Entry point in previous activation (i.e., if the caller was 1754 // interpreted) 1755 Interpreter::_rethrow_exception_entry = __ pc(); 1756 // Restore sp to interpreter_frame_last_sp even though we are going 1757 // to empty the expression stack for the exception processing. 1758 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1759 // r0: exception 1760 // r3: return address/pc that threw exception 1761 __ restore_bcp(); // rbcp points to call/send 1762 __ restore_locals(); 1763 __ restore_constant_pool_cache(); 1764 __ reinit_heapbase(); // restore rheapbase as heapbase. 1765 __ get_dispatch(); 1766 1767 #ifndef PRODUCT 1768 // tell the simulator that the caller method has been reentered 1769 if (NotifySimulator) { 1770 __ get_method(rmethod); 1771 __ notify(Assembler::method_reentry); 1772 } 1773 #endif 1774 // Entry point for exceptions thrown within interpreter code 1775 Interpreter::_throw_exception_entry = __ pc(); 1776 // If we came here via a NullPointerException on the receiver of a 1777 // method, rmethod may be corrupt. 1778 __ get_method(rmethod); 1779 // expression stack is undefined here 1780 // r0: exception 1781 // rbcp: exception bcp 1782 __ verify_oop(r0); 1783 __ mov(c_rarg1, r0); 1784 1785 // expression stack must be empty before entering the VM in case of 1786 // an exception 1787 __ empty_expression_stack(); 1788 // find exception handler address and preserve exception oop 1789 __ call_VM(r3, 1790 CAST_FROM_FN_PTR(address, 1791 InterpreterRuntime::exception_handler_for_exception), 1792 c_rarg1); 1793 1794 // Calculate stack limit 1795 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1796 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1797 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1798 __ ldr(rscratch2, 1799 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1800 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 1801 __ andr(sp, rscratch1, -16); 1802 1803 // r0: exception handler entry point 1804 // r3: preserved exception oop 1805 // rbcp: bcp for exception handler 1806 __ push_ptr(r3); // push exception which is now the only value on the stack 1807 __ br(r0); // jump to exception handler (may be _remove_activation_entry!) 1808 1809 // If the exception is not handled in the current frame the frame is 1810 // removed and the exception is rethrown (i.e. exception 1811 // continuation is _rethrow_exception). 1812 // 1813 // Note: At this point the bci is still the bxi for the instruction 1814 // which caused the exception and the expression stack is 1815 // empty. Thus, for any VM calls at this point, GC will find a legal 1816 // oop map (with empty expression stack). 1817 1818 // 1819 // JVMTI PopFrame support 1820 // 1821 1822 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1823 __ empty_expression_stack(); 1824 // Set the popframe_processing bit in pending_popframe_condition 1825 // indicating that we are currently handling popframe, so that 1826 // call_VMs that may happen later do not trigger new popframe 1827 // handling cycles. 1828 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1829 __ orr(r3, r3, JavaThread::popframe_processing_bit); 1830 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1831 1832 { 1833 // Check to see whether we are returning to a deoptimized frame. 1834 // (The PopFrame call ensures that the caller of the popped frame is 1835 // either interpreted or compiled and deoptimizes it if compiled.) 1836 // In this case, we can't call dispatch_next() after the frame is 1837 // popped, but instead must save the incoming arguments and restore 1838 // them after deoptimization has occurred. 1839 // 1840 // Note that we don't compare the return PC against the 1841 // deoptimization blob's unpack entry because of the presence of 1842 // adapter frames in C2. 1843 Label caller_not_deoptimized; 1844 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); 1845 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1846 InterpreterRuntime::interpreter_contains), c_rarg1); 1847 __ cbnz(r0, caller_not_deoptimized); 1848 1849 // Compute size of arguments for saving when returning to 1850 // deoptimized caller 1851 __ get_method(r0); 1852 __ ldr(r0, Address(r0, Method::const_offset())); 1853 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: 1854 size_of_parameters_offset()))); 1855 __ lsl(r0, r0, Interpreter::logStackElementSize); 1856 __ restore_locals(); // XXX do we need this? 1857 __ sub(rlocals, rlocals, r0); 1858 __ add(rlocals, rlocals, wordSize); 1859 // Save these arguments 1860 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1861 Deoptimization:: 1862 popframe_preserve_args), 1863 rthread, r0, rlocals); 1864 1865 __ remove_activation(vtos, 1866 /* throw_monitor_exception */ false, 1867 /* install_monitor_exception */ false, 1868 /* notify_jvmdi */ false); 1869 1870 // Inform deoptimization that it is responsible for restoring 1871 // these arguments 1872 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1873 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 1874 1875 // Continue in deoptimization handler 1876 __ ret(lr); 1877 1878 __ bind(caller_not_deoptimized); 1879 } 1880 1881 __ remove_activation(vtos, 1882 /* throw_monitor_exception */ false, 1883 /* install_monitor_exception */ false, 1884 /* notify_jvmdi */ false); 1885 1886 // Restore the last_sp and null it out 1887 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1888 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1889 1890 __ restore_bcp(); 1891 __ restore_locals(); 1892 __ restore_constant_pool_cache(); 1893 __ get_method(rmethod); 1894 1895 // The method data pointer was incremented already during 1896 // call profiling. We have to restore the mdp for the current bcp. 1897 if (ProfileInterpreter) { 1898 __ set_method_data_pointer_for_bcp(); 1899 } 1900 1901 // Clear the popframe condition flag 1902 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); 1903 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1904 1905 #if INCLUDE_JVMTI 1906 { 1907 Label L_done; 1908 1909 __ ldrb(rscratch1, Address(rbcp, 0)); 1910 __ cmpw(r1, Bytecodes::_invokestatic); 1911 __ br(Assembler::EQ, L_done); 1912 1913 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1914 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1915 1916 __ ldr(c_rarg0, Address(rlocals, 0)); 1917 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); 1918 1919 __ cbz(r0, L_done); 1920 1921 __ str(r0, Address(esp, 0)); 1922 __ bind(L_done); 1923 } 1924 #endif // INCLUDE_JVMTI 1925 1926 // Restore machine SP 1927 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1928 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1929 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1930 __ ldr(rscratch2, 1931 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1932 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 1933 __ andr(sp, rscratch1, -16); 1934 1935 __ dispatch_next(vtos); 1936 // end of PopFrame support 1937 1938 Interpreter::_remove_activation_entry = __ pc(); 1939 1940 // preserve exception over this code sequence 1941 __ pop_ptr(r0); 1942 __ str(r0, Address(rthread, JavaThread::vm_result_offset())); 1943 // remove the activation (without doing throws on illegalMonitorExceptions) 1944 __ remove_activation(vtos, false, true, false); 1945 // restore exception 1946 // restore exception 1947 __ get_vm_result(r0, rthread); 1948 1949 // In between activations - previous activation type unknown yet 1950 // compute continuation point - the continuation point expects the 1951 // following registers set up: 1952 // 1953 // r0: exception 1954 // lr: return address/pc that threw exception 1955 // rsp: expression stack of caller 1956 // rfp: fp of caller 1957 // FIXME: There's no point saving LR here because VM calls don't trash it 1958 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address 1959 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1960 SharedRuntime::exception_handler_for_return_address), 1961 rthread, lr); 1962 __ mov(r1, r0); // save exception handler 1963 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address 1964 // We might be returning to a deopt handler that expects r3 to 1965 // contain the exception pc 1966 __ mov(r3, lr); 1967 // Note that an "issuing PC" is actually the next PC after the call 1968 __ br(r1); // jump to exception 1969 // handler of caller 1970 } 1971 1972 1973 // 1974 // JVMTI ForceEarlyReturn support 1975 // 1976 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1977 address entry = __ pc(); 1978 1979 __ restore_bcp(); 1980 __ restore_locals(); 1981 __ empty_expression_stack(); 1982 __ load_earlyret_value(state); 1983 1984 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 1985 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); 1986 1987 // Clear the earlyret state 1988 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1989 __ str(zr, cond_addr); 1990 1991 __ remove_activation(state, 1992 false, /* throw_monitor_exception */ 1993 false, /* install_monitor_exception */ 1994 true); /* notify_jvmdi */ 1995 __ ret(lr); 1996 1997 return entry; 1998 } // end of ForceEarlyReturn support 1999 2000 2001 2002 //----------------------------------------------------------------------------- 2003 // Helper for vtos entry point generation 2004 2005 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2006 address& bep, 2007 address& cep, 2008 address& sep, 2009 address& aep, 2010 address& iep, 2011 address& lep, 2012 address& fep, 2013 address& dep, 2014 address& vep) { 2015 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2016 Label L; 2017 aep = __ pc(); __ push_ptr(); __ b(L); 2018 fep = __ pc(); __ push_f(); __ b(L); 2019 dep = __ pc(); __ push_d(); __ b(L); 2020 lep = __ pc(); __ push_l(); __ b(L); 2021 bep = cep = sep = 2022 iep = __ pc(); __ push_i(); 2023 vep = __ pc(); 2024 __ bind(L); 2025 generate_and_dispatch(t); 2026 } 2027 2028 //----------------------------------------------------------------------------- 2029 2030 // Non-product code 2031 #ifndef PRODUCT 2032 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2033 address entry = __ pc(); 2034 2035 __ push(lr); 2036 __ push(state); 2037 __ push(RegSet::range(r0, r15), sp); 2038 __ mov(c_rarg2, r0); // Pass itos 2039 __ call_VM(noreg, 2040 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 2041 c_rarg1, c_rarg2, c_rarg3); 2042 __ pop(RegSet::range(r0, r15), sp); 2043 __ pop(state); 2044 __ pop(lr); 2045 __ ret(lr); // return from result handler 2046 2047 return entry; 2048 } 2049 2050 void TemplateInterpreterGenerator::count_bytecode() { 2051 Register rscratch3 = r0; 2052 __ push(rscratch1); 2053 __ push(rscratch2); 2054 __ push(rscratch3); 2055 __ mov(rscratch3, (address) &BytecodeCounter::_counter_value); 2056 __ atomic_add(noreg, 1, rscratch3); 2057 __ pop(rscratch3); 2058 __ pop(rscratch2); 2059 __ pop(rscratch1); 2060 } 2061 2062 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } 2063 2064 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } 2065 2066 2067 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2068 // Call a little run-time stub to avoid blow-up for each bytecode. 2069 // The run-time runtime saves the right registers, depending on 2070 // the tosca in-state for the given template. 2071 2072 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2073 "entry must have been generated"); 2074 __ bl(Interpreter::trace_code(t->tos_in())); 2075 __ reinit_heapbase(); 2076 } 2077 2078 2079 void TemplateInterpreterGenerator::stop_interpreter_at() { 2080 Label L; 2081 __ push(rscratch1); 2082 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value); 2083 __ ldr(rscratch1, Address(rscratch1)); 2084 __ mov(rscratch2, StopInterpreterAt); 2085 __ cmpw(rscratch1, rscratch2); 2086 __ br(Assembler::NE, L); 2087 __ brk(0); 2088 __ bind(L); 2089 __ pop(rscratch1); 2090 } 2091 2092 #ifdef BUILTIN_SIM 2093 2094 #include <sys/mman.h> 2095 #include <unistd.h> 2096 2097 extern "C" { 2098 static int PAGESIZE = getpagesize(); 2099 int is_mapped_address(u_int64_t address) 2100 { 2101 address = (address & ~((u_int64_t)PAGESIZE - 1)); 2102 if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) { 2103 return true; 2104 } 2105 if (errno != ENOMEM) { 2106 return true; 2107 } 2108 return false; 2109 } 2110 2111 void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2112 { 2113 if (method != 0) { 2114 method[0] = '\0'; 2115 } 2116 if (bcidx != 0) { 2117 *bcidx = -2; 2118 } 2119 if (decode != 0) { 2120 decode[0] = 0; 2121 } 2122 2123 if (framesize != 0) { 2124 *framesize = -1; 2125 } 2126 2127 if (Interpreter::contains((address)pc)) { 2128 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); 2129 Method* meth; 2130 address bcp; 2131 if (fp) { 2132 #define FRAME_SLOT_METHOD 3 2133 #define FRAME_SLOT_BCP 7 2134 meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3)); 2135 bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3)); 2136 #undef FRAME_SLOT_METHOD 2137 #undef FRAME_SLOT_BCP 2138 } else { 2139 meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0); 2140 bcp = (address)sim->getCPUState().xreg(RBCP, 0); 2141 } 2142 if (meth->is_native()) { 2143 return; 2144 } 2145 if(method && meth->is_method()) { 2146 ResourceMark rm; 2147 method[0] = 'I'; 2148 method[1] = ' '; 2149 meth->name_and_sig_as_C_string(method + 2, 398); 2150 } 2151 if (bcidx) { 2152 if (meth->contains(bcp)) { 2153 *bcidx = meth->bci_from(bcp); 2154 } else { 2155 *bcidx = -2; 2156 } 2157 } 2158 if (decode) { 2159 if (!BytecodeTracer::closure()) { 2160 BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 2161 } 2162 stringStream str(decode, 400); 2163 BytecodeTracer::trace(meth, bcp, &str); 2164 } 2165 } else { 2166 if (method) { 2167 CodeBlob *cb = CodeCache::find_blob((address)pc); 2168 if (cb != NULL) { 2169 if (cb->is_nmethod()) { 2170 ResourceMark rm; 2171 nmethod* nm = (nmethod*)cb; 2172 method[0] = 'C'; 2173 method[1] = ' '; 2174 nm->method()->name_and_sig_as_C_string(method + 2, 398); 2175 } else if (cb->is_adapter_blob()) { 2176 strcpy(method, "B adapter blob"); 2177 } else if (cb->is_runtime_stub()) { 2178 strcpy(method, "B runtime stub"); 2179 } else if (cb->is_exception_stub()) { 2180 strcpy(method, "B exception stub"); 2181 } else if (cb->is_deoptimization_stub()) { 2182 strcpy(method, "B deoptimization stub"); 2183 } else if (cb->is_safepoint_stub()) { 2184 strcpy(method, "B safepoint stub"); 2185 } else if (cb->is_uncommon_trap_stub()) { 2186 strcpy(method, "B uncommon trap stub"); 2187 } else if (cb->contains((address)StubRoutines::call_stub())) { 2188 strcpy(method, "B call stub"); 2189 } else { 2190 strcpy(method, "B unknown blob : "); 2191 strcat(method, cb->name()); 2192 } 2193 if (framesize != NULL) { 2194 *framesize = cb->frame_size(); 2195 } 2196 } 2197 } 2198 } 2199 } 2200 2201 2202 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2203 { 2204 bccheck1(pc, fp, method, bcidx, framesize, decode); 2205 } 2206 } 2207 2208 #endif // BUILTIN_SIM 2209 #endif // !PRODUCT