1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "gc/shared/barrierSetCodeGen.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "interpreter/bytecodeTracer.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/arrayOop.hpp" 38 #include "oops/methodData.hpp" 39 #include "oops/method.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "prims/jvmtiExport.hpp" 42 #include "prims/jvmtiThreadState.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/frame.inline.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/timer.hpp" 50 #include "runtime/vframeArray.hpp" 51 #include "utilities/debug.hpp" 52 #include <sys/types.h> 53 54 #ifndef PRODUCT 55 #include "oops/method.hpp" 56 #endif // !PRODUCT 57 58 #ifdef BUILTIN_SIM 59 #include "../../../../../../simulator/simulator.hpp" 60 #endif 61 62 // Size of interpreter code. Increase if too small. Interpreter will 63 // fail with a guarantee ("not enough space for interpreter generation"); 64 // if too small. 65 // Run with +PrintInterpreter to get the VM to print out the size. 66 // Max size with JVMTI 67 int TemplateInterpreter::InterpreterCodeSize = 200 * 1024; 68 69 #define __ _masm-> 70 71 //----------------------------------------------------------------------------- 72 73 extern "C" void entry(CodeBuffer*); 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 78 address entry = __ pc(); 79 80 __ andr(esp, esp, -16); 81 __ mov(c_rarg3, esp); 82 // rmethod 83 // rlocals 84 // c_rarg3: first stack arg - wordSize 85 86 // adjust sp 87 __ sub(sp, c_rarg3, 18 * wordSize); 88 __ str(lr, Address(__ pre(sp, -2 * wordSize))); 89 __ call_VM(noreg, 90 CAST_FROM_FN_PTR(address, 91 InterpreterRuntime::slow_signature_handler), 92 rmethod, rlocals, c_rarg3); 93 94 // r0: result handler 95 96 // Stack layout: 97 // rsp: return address <- sp 98 // 1 garbage 99 // 8 integer args (if static first is unused) 100 // 1 float/double identifiers 101 // 8 double args 102 // stack args <- esp 103 // garbage 104 // expression stack bottom 105 // bcp (NULL) 106 // ... 107 108 // Restore LR 109 __ ldr(lr, Address(__ post(sp, 2 * wordSize))); 110 111 // Do FP first so we can use c_rarg3 as temp 112 __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers 113 114 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { 115 const FloatRegister r = as_FloatRegister(i); 116 117 Label d, done; 118 119 __ tbnz(c_rarg3, i, d); 120 __ ldrs(r, Address(sp, (10 + i) * wordSize)); 121 __ b(done); 122 __ bind(d); 123 __ ldrd(r, Address(sp, (10 + i) * wordSize)); 124 __ bind(done); 125 } 126 127 // c_rarg0 contains the result from the call of 128 // InterpreterRuntime::slow_signature_handler so we don't touch it 129 // here. It will be loaded with the JNIEnv* later. 130 __ ldr(c_rarg1, Address(sp, 1 * wordSize)); 131 for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) { 132 Register rm = as_Register(i), rn = as_Register(i+1); 133 __ ldp(rm, rn, Address(sp, i * wordSize)); 134 } 135 136 __ add(sp, sp, 18 * wordSize); 137 __ ret(lr); 138 139 return entry; 140 } 141 142 143 // 144 // Various method entries 145 // 146 147 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 148 // rmethod: Method* 149 // r13: sender sp 150 // esp: args 151 152 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 153 154 // These don't need a safepoint check because they aren't virtually 155 // callable. We won't enter these intrinsics from compiled code. 156 // If in the future we added an intrinsic which was virtually callable 157 // we'd have to worry about how to safepoint so that this code is used. 158 159 // mathematical functions inlined by compiler 160 // (interpreter must provide identical implementation 161 // in order to avoid monotonicity bugs when switching 162 // from interpreter to compiler in the middle of some 163 // computation) 164 // 165 // stack: 166 // [ arg ] <-- esp 167 // [ arg ] 168 // retaddr in lr 169 170 address entry_point = NULL; 171 Register continuation = lr; 172 switch (kind) { 173 case Interpreter::java_lang_math_abs: 174 entry_point = __ pc(); 175 __ ldrd(v0, Address(esp)); 176 __ fabsd(v0, v0); 177 __ mov(sp, r13); // Restore caller's SP 178 break; 179 case Interpreter::java_lang_math_sqrt: 180 entry_point = __ pc(); 181 __ ldrd(v0, Address(esp)); 182 __ fsqrtd(v0, v0); 183 __ mov(sp, r13); 184 break; 185 case Interpreter::java_lang_math_sin : 186 case Interpreter::java_lang_math_cos : 187 case Interpreter::java_lang_math_tan : 188 case Interpreter::java_lang_math_log : 189 case Interpreter::java_lang_math_log10 : 190 case Interpreter::java_lang_math_exp : 191 entry_point = __ pc(); 192 __ ldrd(v0, Address(esp)); 193 __ mov(sp, r13); 194 __ mov(r19, lr); 195 continuation = r19; // The first callee-saved register 196 generate_transcendental_entry(kind, 1); 197 break; 198 case Interpreter::java_lang_math_pow : 199 entry_point = __ pc(); 200 __ mov(r19, lr); 201 continuation = r19; 202 __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize)); 203 __ ldrd(v1, Address(esp)); 204 __ mov(sp, r13); 205 generate_transcendental_entry(kind, 2); 206 break; 207 case Interpreter::java_lang_math_fmaD : 208 if (UseFMA) { 209 entry_point = __ pc(); 210 __ ldrd(v0, Address(esp, 4 * Interpreter::stackElementSize)); 211 __ ldrd(v1, Address(esp, 2 * Interpreter::stackElementSize)); 212 __ ldrd(v2, Address(esp)); 213 __ fmaddd(v0, v0, v1, v2); 214 __ mov(sp, r13); // Restore caller's SP 215 } 216 break; 217 case Interpreter::java_lang_math_fmaF : 218 if (UseFMA) { 219 entry_point = __ pc(); 220 __ ldrs(v0, Address(esp, 2 * Interpreter::stackElementSize)); 221 __ ldrs(v1, Address(esp, Interpreter::stackElementSize)); 222 __ ldrs(v2, Address(esp)); 223 __ fmadds(v0, v0, v1, v2); 224 __ mov(sp, r13); // Restore caller's SP 225 } 226 break; 227 default: 228 ; 229 } 230 if (entry_point) { 231 __ br(continuation); 232 } 233 234 return entry_point; 235 } 236 237 // double trigonometrics and transcendentals 238 // static jdouble dsin(jdouble x); 239 // static jdouble dcos(jdouble x); 240 // static jdouble dtan(jdouble x); 241 // static jdouble dlog(jdouble x); 242 // static jdouble dlog10(jdouble x); 243 // static jdouble dexp(jdouble x); 244 // static jdouble dpow(jdouble x, jdouble y); 245 246 void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) { 247 address fn; 248 switch (kind) { 249 case Interpreter::java_lang_math_sin : 250 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 251 break; 252 case Interpreter::java_lang_math_cos : 253 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 254 break; 255 case Interpreter::java_lang_math_tan : 256 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 257 break; 258 case Interpreter::java_lang_math_log : 259 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 260 break; 261 case Interpreter::java_lang_math_log10 : 262 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 263 break; 264 case Interpreter::java_lang_math_exp : 265 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 266 break; 267 case Interpreter::java_lang_math_pow : 268 fpargs = 2; 269 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 270 break; 271 default: 272 ShouldNotReachHere(); 273 fn = NULL; // unreachable 274 } 275 const int gpargs = 0, rtype = 3; 276 __ mov(rscratch1, fn); 277 __ blrt(rscratch1, gpargs, fpargs, rtype); 278 } 279 280 // Abstract method entry 281 // Attempt to execute abstract method. Throw exception 282 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 283 // rmethod: Method* 284 // r13: sender SP 285 286 address entry_point = __ pc(); 287 288 // abstract method entry 289 290 // pop return address, reset last_sp to NULL 291 __ empty_expression_stack(); 292 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed) 293 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 294 295 // throw exception 296 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 297 InterpreterRuntime::throw_AbstractMethodError)); 298 // the call_VM checks for exception, so we should never return here. 299 __ should_not_reach_here(); 300 301 return entry_point; 302 } 303 304 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 305 address entry = __ pc(); 306 307 #ifdef ASSERT 308 { 309 Label L; 310 __ ldr(rscratch1, Address(rfp, 311 frame::interpreter_frame_monitor_block_top_offset * 312 wordSize)); 313 __ mov(rscratch2, sp); 314 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack 315 // grows negative) 316 __ br(Assembler::HS, L); // check if frame is complete 317 __ stop ("interpreter frame not set up"); 318 __ bind(L); 319 } 320 #endif // ASSERT 321 // Restore bcp under the assumption that the current frame is still 322 // interpreted 323 __ restore_bcp(); 324 325 // expression stack must be empty before entering the VM if an 326 // exception happened 327 __ empty_expression_stack(); 328 // throw exception 329 __ call_VM(noreg, 330 CAST_FROM_FN_PTR(address, 331 InterpreterRuntime::throw_StackOverflowError)); 332 return entry; 333 } 334 335 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 336 const char* name) { 337 address entry = __ pc(); 338 // expression stack must be empty before entering the VM if an 339 // exception happened 340 __ empty_expression_stack(); 341 // setup parameters 342 // ??? convention: expect aberrant index in register r1 343 __ movw(c_rarg2, r1); 344 __ mov(c_rarg1, (address)name); 345 __ call_VM(noreg, 346 CAST_FROM_FN_PTR(address, 347 InterpreterRuntime:: 348 throw_ArrayIndexOutOfBoundsException), 349 c_rarg1, c_rarg2); 350 return entry; 351 } 352 353 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 354 address entry = __ pc(); 355 356 // object is at TOS 357 __ pop(c_rarg1); 358 359 // expression stack must be empty before entering the VM if an 360 // exception happened 361 __ empty_expression_stack(); 362 363 __ call_VM(noreg, 364 CAST_FROM_FN_PTR(address, 365 InterpreterRuntime:: 366 throw_ClassCastException), 367 c_rarg1); 368 return entry; 369 } 370 371 address TemplateInterpreterGenerator::generate_exception_handler_common( 372 const char* name, const char* message, bool pass_oop) { 373 assert(!pass_oop || message == NULL, "either oop or message but not both"); 374 address entry = __ pc(); 375 if (pass_oop) { 376 // object is at TOS 377 __ pop(c_rarg2); 378 } 379 // expression stack must be empty before entering the VM if an 380 // exception happened 381 __ empty_expression_stack(); 382 // setup parameters 383 __ lea(c_rarg1, Address((address)name)); 384 if (pass_oop) { 385 __ call_VM(r0, CAST_FROM_FN_PTR(address, 386 InterpreterRuntime:: 387 create_klass_exception), 388 c_rarg1, c_rarg2); 389 } else { 390 // kind of lame ExternalAddress can't take NULL because 391 // external_word_Relocation will assert. 392 if (message != NULL) { 393 __ lea(c_rarg2, Address((address)message)); 394 } else { 395 __ mov(c_rarg2, NULL_WORD); 396 } 397 __ call_VM(r0, 398 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 399 c_rarg1, c_rarg2); 400 } 401 // throw exception 402 __ b(address(Interpreter::throw_exception_entry())); 403 return entry; 404 } 405 406 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 407 address entry = __ pc(); 408 409 // Restore stack bottom in case i2c adjusted stack 410 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 411 // and NULL it as marker that esp is now tos until next java call 412 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 413 __ restore_bcp(); 414 __ restore_locals(); 415 __ restore_constant_pool_cache(); 416 __ get_method(rmethod); 417 418 // Pop N words from the stack 419 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size); 420 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 421 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); 422 423 __ add(esp, esp, r1, Assembler::LSL, 3); 424 425 // Restore machine SP 426 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 427 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 428 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 429 __ ldr(rscratch2, 430 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 431 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 432 __ andr(sp, rscratch1, -16); 433 434 #ifndef PRODUCT 435 // tell the simulator that the method has been reentered 436 if (NotifySimulator) { 437 __ notify(Assembler::method_reentry); 438 } 439 #endif 440 441 __ check_and_handle_popframe(rthread); 442 __ check_and_handle_earlyret(rthread); 443 444 __ get_dispatch(); 445 __ dispatch_next(state, step); 446 447 return entry; 448 } 449 450 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 451 int step) { 452 address entry = __ pc(); 453 __ restore_bcp(); 454 __ restore_locals(); 455 __ restore_constant_pool_cache(); 456 __ get_method(rmethod); 457 __ get_dispatch(); 458 459 // Calculate stack limit 460 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 461 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 462 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 463 __ ldr(rscratch2, 464 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 465 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 466 __ andr(sp, rscratch1, -16); 467 468 // Restore expression stack pointer 469 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 470 // NULL last_sp until next java call 471 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 472 473 #if INCLUDE_JVMCI 474 // Check if we need to take lock at entry of synchronized method. This can 475 // only occur on method entry so emit it only for vtos with step 0. 476 if (UseJVMCICompiler && state == vtos && step == 0) { 477 Label L; 478 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 479 __ cbz(rscratch1, L); 480 // Clear flag. 481 __ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset())); 482 // Take lock. 483 lock_method(); 484 __ bind(L); 485 } else { 486 #ifdef ASSERT 487 if (UseJVMCICompiler) { 488 Label L; 489 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 490 __ cbz(rscratch1, L); 491 __ stop("unexpected pending monitor in deopt entry"); 492 __ bind(L); 493 } 494 #endif 495 } 496 #endif 497 // handle exceptions 498 { 499 Label L; 500 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 501 __ cbz(rscratch1, L); 502 __ call_VM(noreg, 503 CAST_FROM_FN_PTR(address, 504 InterpreterRuntime::throw_pending_exception)); 505 __ should_not_reach_here(); 506 __ bind(L); 507 } 508 509 __ dispatch_next(state, step); 510 return entry; 511 } 512 513 address TemplateInterpreterGenerator::generate_result_handler_for( 514 BasicType type) { 515 address entry = __ pc(); 516 switch (type) { 517 case T_BOOLEAN: __ uxtb(r0, r0); break; 518 case T_CHAR : __ uxth(r0, r0); break; 519 case T_BYTE : __ sxtb(r0, r0); break; 520 case T_SHORT : __ sxth(r0, r0); break; 521 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this 522 case T_LONG : /* nothing to do */ break; 523 case T_VOID : /* nothing to do */ break; 524 case T_FLOAT : /* nothing to do */ break; 525 case T_DOUBLE : /* nothing to do */ break; 526 case T_OBJECT : 527 // retrieve result from frame 528 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 529 // and verify it 530 __ verify_oop(r0); 531 break; 532 default : ShouldNotReachHere(); 533 } 534 __ ret(lr); // return from result handler 535 return entry; 536 } 537 538 address TemplateInterpreterGenerator::generate_safept_entry_for( 539 TosState state, 540 address runtime_entry) { 541 address entry = __ pc(); 542 __ push(state); 543 __ call_VM(noreg, runtime_entry); 544 __ membar(Assembler::AnyAny); 545 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 546 return entry; 547 } 548 549 // Helpers for commoning out cases in the various type of method entries. 550 // 551 552 553 // increment invocation count & check for overflow 554 // 555 // Note: checking for negative value instead of overflow 556 // so we have a 'sticky' overflow test 557 // 558 // rmethod: method 559 // 560 void TemplateInterpreterGenerator::generate_counter_incr( 561 Label* overflow, 562 Label* profile_method, 563 Label* profile_method_continue) { 564 Label done; 565 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 566 if (TieredCompilation) { 567 int increment = InvocationCounter::count_increment; 568 Label no_mdo; 569 if (ProfileInterpreter) { 570 // Are we profiling? 571 __ ldr(r0, Address(rmethod, Method::method_data_offset())); 572 __ cbz(r0, no_mdo); 573 // Increment counter in the MDO 574 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + 575 in_bytes(InvocationCounter::counter_offset())); 576 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset())); 577 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 578 __ b(done); 579 } 580 __ bind(no_mdo); 581 // Increment counter in MethodCounters 582 const Address invocation_counter(rscratch2, 583 MethodCounters::invocation_counter_offset() + 584 InvocationCounter::counter_offset()); 585 __ get_method_counters(rmethod, rscratch2, done); 586 const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset())); 587 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow); 588 __ bind(done); 589 } else { // not TieredCompilation 590 const Address backedge_counter(rscratch2, 591 MethodCounters::backedge_counter_offset() + 592 InvocationCounter::counter_offset()); 593 const Address invocation_counter(rscratch2, 594 MethodCounters::invocation_counter_offset() + 595 InvocationCounter::counter_offset()); 596 597 __ get_method_counters(rmethod, rscratch2, done); 598 599 if (ProfileInterpreter) { // %%% Merge this into MethodData* 600 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 601 __ addw(r1, r1, 1); 602 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 603 } 604 // Update standard invocation counters 605 __ ldrw(r1, invocation_counter); 606 __ ldrw(r0, backedge_counter); 607 608 __ addw(r1, r1, InvocationCounter::count_increment); 609 __ andw(r0, r0, InvocationCounter::count_mask_value); 610 611 __ strw(r1, invocation_counter); 612 __ addw(r0, r0, r1); // add both counters 613 614 // profile_method is non-null only for interpreted method so 615 // profile_method != NULL == !native_call 616 617 if (ProfileInterpreter && profile_method != NULL) { 618 // Test to see if we should create a method data oop 619 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 620 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 621 __ cmpw(r0, rscratch2); 622 __ br(Assembler::LT, *profile_method_continue); 623 624 // if no method data exists, go to profile_method 625 __ test_method_data_pointer(rscratch2, *profile_method); 626 } 627 628 { 629 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 630 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 631 __ cmpw(r0, rscratch2); 632 __ br(Assembler::HS, *overflow); 633 } 634 __ bind(done); 635 } 636 } 637 638 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 639 640 // Asm interpreter on entry 641 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 642 // Everything as it was on entry 643 644 // InterpreterRuntime::frequency_counter_overflow takes two 645 // arguments, the first (thread) is passed by call_VM, the second 646 // indicates if the counter overflow occurs at a backwards branch 647 // (NULL bcp). We pass zero for it. The call returns the address 648 // of the verified entry point for the method or NULL if the 649 // compilation did not complete (either went background or bailed 650 // out). 651 __ mov(c_rarg1, 0); 652 __ call_VM(noreg, 653 CAST_FROM_FN_PTR(address, 654 InterpreterRuntime::frequency_counter_overflow), 655 c_rarg1); 656 657 __ b(do_continue); 658 } 659 660 // See if we've got enough room on the stack for locals plus overhead 661 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 662 // without going through the signal handler, i.e., reserved and yellow zones 663 // will not be made usable. The shadow zone must suffice to handle the 664 // overflow. 665 // The expression stack grows down incrementally, so the normal guard 666 // page mechanism will work for that. 667 // 668 // NOTE: Since the additional locals are also always pushed (wasn't 669 // obvious in generate_method_entry) so the guard should work for them 670 // too. 671 // 672 // Args: 673 // r3: number of additional locals this frame needs (what we must check) 674 // rmethod: Method* 675 // 676 // Kills: 677 // r0 678 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 679 680 // monitor entry size: see picture of stack set 681 // (generate_method_entry) and frame_amd64.hpp 682 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 683 684 // total overhead size: entry_size + (saved rbp through expr stack 685 // bottom). be sure to change this if you add/subtract anything 686 // to/from the overhead area 687 const int overhead_size = 688 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 689 690 const int page_size = os::vm_page_size(); 691 692 Label after_frame_check; 693 694 // see if the frame is greater than one page in size. If so, 695 // then we need to verify there is enough stack space remaining 696 // for the additional locals. 697 // 698 // Note that we use SUBS rather than CMP here because the immediate 699 // field of this instruction may overflow. SUBS can cope with this 700 // because it is a macro that will expand to some number of MOV 701 // instructions and a register operation. 702 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); 703 __ br(Assembler::LS, after_frame_check); 704 705 // compute rsp as if this were going to be the last frame on 706 // the stack before the red zone 707 708 // locals + overhead, in bytes 709 __ mov(r0, overhead_size); 710 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. 711 712 const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset()); 713 __ ldr(rscratch1, stack_limit); 714 715 #ifdef ASSERT 716 Label limit_okay; 717 // Verify that thread stack limit is non-zero. 718 __ cbnz(rscratch1, limit_okay); 719 __ stop("stack overflow limit is zero"); 720 __ bind(limit_okay); 721 #endif 722 723 // Add stack limit to locals. 724 __ add(r0, r0, rscratch1); 725 726 // Check against the current stack bottom. 727 __ cmp(sp, r0); 728 __ br(Assembler::HI, after_frame_check); 729 730 // Remove the incoming args, peeling the machine SP back to where it 731 // was in the caller. This is not strictly necessary, but unless we 732 // do so the stack frame may have a garbage FP; this ensures a 733 // correct call stack that we can always unwind. The ANDR should be 734 // unnecessary because the sender SP in r13 is always aligned, but 735 // it doesn't hurt. 736 __ andr(sp, r13, -16); 737 738 // Note: the restored frame is not necessarily interpreted. 739 // Use the shared runtime version of the StackOverflowError. 740 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 741 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 742 743 // all done with frame size check 744 __ bind(after_frame_check); 745 } 746 747 // Allocate monitor and lock method (asm interpreter) 748 // 749 // Args: 750 // rmethod: Method* 751 // rlocals: locals 752 // 753 // Kills: 754 // r0 755 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 756 // rscratch1, rscratch2 (scratch regs) 757 void TemplateInterpreterGenerator::lock_method() { 758 // synchronize method 759 const Address access_flags(rmethod, Method::access_flags_offset()); 760 const Address monitor_block_top( 761 rfp, 762 frame::interpreter_frame_monitor_block_top_offset * wordSize); 763 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 764 765 #ifdef ASSERT 766 { 767 Label L; 768 __ ldrw(r0, access_flags); 769 __ tst(r0, JVM_ACC_SYNCHRONIZED); 770 __ br(Assembler::NE, L); 771 __ stop("method doesn't need synchronization"); 772 __ bind(L); 773 } 774 #endif // ASSERT 775 776 // get synchronization object 777 { 778 Label done; 779 __ ldrw(r0, access_flags); 780 __ tst(r0, JVM_ACC_STATIC); 781 // get receiver (assume this is frequent case) 782 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 783 __ br(Assembler::EQ, done); 784 __ load_mirror(r0, rmethod); 785 786 #ifdef ASSERT 787 { 788 Label L; 789 __ cbnz(r0, L); 790 __ stop("synchronization object is NULL"); 791 __ bind(L); 792 } 793 #endif // ASSERT 794 795 __ bind(done); 796 } 797 798 // add space for monitor & lock 799 __ sub(sp, sp, entry_size); // add space for a monitor entry 800 __ sub(esp, esp, entry_size); 801 __ mov(rscratch1, esp); 802 __ str(rscratch1, monitor_block_top); // set new monitor block top 803 // store object 804 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); 805 __ mov(c_rarg1, esp); // object address 806 __ lock_object(c_rarg1); 807 } 808 809 // Generate a fixed interpreter frame. This is identical setup for 810 // interpreted methods and for native methods hence the shared code. 811 // 812 // Args: 813 // lr: return address 814 // rmethod: Method* 815 // rlocals: pointer to locals 816 // rcpool: cp cache 817 // stack_pointer: previous sp 818 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 819 // initialize fixed part of activation frame 820 if (native_call) { 821 __ sub(esp, sp, 14 * wordSize); 822 __ mov(rbcp, zr); 823 __ stp(esp, zr, Address(__ pre(sp, -14 * wordSize))); 824 // add 2 zero-initialized slots for native calls 825 __ stp(zr, zr, Address(sp, 12 * wordSize)); 826 } else { 827 __ sub(esp, sp, 12 * wordSize); 828 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod 829 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase 830 __ stp(esp, rbcp, Address(__ pre(sp, -12 * wordSize))); 831 } 832 833 if (ProfileInterpreter) { 834 Label method_data_continue; 835 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); 836 __ cbz(rscratch1, method_data_continue); 837 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); 838 __ bind(method_data_continue); 839 __ stp(rscratch1, rmethod, Address(sp, 6 * wordSize)); // save Method* and mdp (method data pointer) 840 } else { 841 __ stp(zr, rmethod, Address(sp, 6 * wordSize)); // save Method* (no mdp) 842 } 843 844 // Get mirror and store it in the frame as GC root for this Method* 845 __ load_mirror(rscratch1, rmethod); 846 __ stp(rscratch1, zr, Address(sp, 4 * wordSize)); 847 848 __ ldr(rcpool, Address(rmethod, Method::const_offset())); 849 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); 850 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); 851 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); 852 853 __ stp(rfp, lr, Address(sp, 10 * wordSize)); 854 __ lea(rfp, Address(sp, 10 * wordSize)); 855 856 // set sender sp 857 // leave last_sp as null 858 __ stp(zr, r13, Address(sp, 8 * wordSize)); 859 860 // Move SP out of the way 861 if (! native_call) { 862 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 863 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 864 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 865 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); 866 __ andr(sp, rscratch1, -16); 867 } 868 } 869 870 // End of helpers 871 872 // Various method entries 873 //------------------------------------------------------------------------------------------------------------------------ 874 // 875 // 876 877 // Method entry for java.lang.ref.Reference.get. 878 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 879 // Code: _aload_0, _getfield, _areturn 880 // parameter size = 1 881 // 882 // The code that gets generated by this routine is split into 2 parts: 883 // 1. The "intrinsified" code for G1 (or any SATB based GC), 884 // 2. The slow path - which is an expansion of the regular method entry. 885 // 886 // Notes:- 887 // * In the G1 code we do not check whether we need to block for 888 // a safepoint. If G1 is enabled then we must execute the specialized 889 // code for Reference.get (except when the Reference object is null) 890 // so that we can log the value in the referent field with an SATB 891 // update buffer. 892 // If the code for the getfield template is modified so that the 893 // G1 pre-barrier code is executed when the current method is 894 // Reference.get() then going through the normal method entry 895 // will be fine. 896 // * The G1 code can, however, check the receiver object (the instance 897 // of java.lang.Reference) and jump to the slow path if null. If the 898 // Reference object is null then we obviously cannot fetch the referent 899 // and so we don't need to call the G1 pre-barrier. Thus we can use the 900 // regular method entry code to generate the NPE. 901 // 902 // This code is based on generate_accessor_entry. 903 // 904 // rmethod: Method* 905 // r13: senderSP must preserve for slow path, set SP to it on fast path 906 907 address entry = __ pc(); 908 909 const int referent_offset = java_lang_ref_Reference::referent_offset; 910 guarantee(referent_offset > 0, "referent offset not initialized"); 911 912 Label slow_path; 913 const Register local_0 = c_rarg0; 914 // Check if local 0 != NULL 915 // If the receiver is null then it is OK to jump to the slow path. 916 __ ldr(local_0, Address(esp, 0)); 917 __ cbz(local_0, slow_path); 918 919 __ mov(r19, r13); // Move senderSP to a callee-saved register 920 921 // Load the value of the referent field. 922 const Address field_address(local_0, referent_offset); 923 BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen(); 924 code_gen->load_at(_masm, ACCESS_IN_HEAP | ACCESS_ON_WEAK_OOP_REF, T_OBJECT, local_0, field_address, /*tmp1*/ rscratch2, /*tmp2*/ rscratch1); 925 926 // areturn 927 __ andr(sp, r19, -16); // done with stack 928 __ ret(lr); 929 930 // generate a vanilla interpreter entry as the slow path 931 __ bind(slow_path); 932 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 933 return entry; 934 935 } 936 937 /** 938 * Method entry for static native methods: 939 * int java.util.zip.CRC32.update(int crc, int b) 940 */ 941 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 942 if (UseCRC32Intrinsics) { 943 address entry = __ pc(); 944 945 // rmethod: Method* 946 // r13: senderSP must preserved for slow path 947 // esp: args 948 949 Label slow_path; 950 // If we need a safepoint check, generate full interpreter entry. 951 ExternalAddress state(SafepointSynchronize::address_of_state()); 952 unsigned long offset; 953 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 954 __ ldrw(rscratch1, Address(rscratch1, offset)); 955 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 956 __ cbnz(rscratch1, slow_path); 957 958 // We don't generate local frame and don't align stack because 959 // we call stub code and there is no safepoint on this path. 960 961 // Load parameters 962 const Register crc = c_rarg0; // crc 963 const Register val = c_rarg1; // source java byte value 964 const Register tbl = c_rarg2; // scratch 965 966 // Arguments are reversed on java expression stack 967 __ ldrw(val, Address(esp, 0)); // byte value 968 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC 969 970 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); 971 __ add(tbl, tbl, offset); 972 973 __ ornw(crc, zr, crc); // ~crc 974 __ update_byte_crc32(crc, val, tbl); 975 __ ornw(crc, zr, crc); // ~crc 976 977 // result in c_rarg0 978 979 __ andr(sp, r13, -16); 980 __ ret(lr); 981 982 // generate a vanilla native entry as the slow path 983 __ bind(slow_path); 984 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 985 return entry; 986 } 987 return NULL; 988 } 989 990 /** 991 * Method entry for static native methods: 992 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 993 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 994 */ 995 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 996 if (UseCRC32Intrinsics) { 997 address entry = __ pc(); 998 999 // rmethod,: Method* 1000 // r13: senderSP must preserved for slow path 1001 1002 Label slow_path; 1003 // If we need a safepoint check, generate full interpreter entry. 1004 ExternalAddress state(SafepointSynchronize::address_of_state()); 1005 unsigned long offset; 1006 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 1007 __ ldrw(rscratch1, Address(rscratch1, offset)); 1008 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 1009 __ cbnz(rscratch1, slow_path); 1010 1011 // We don't generate local frame and don't align stack because 1012 // we call stub code and there is no safepoint on this path. 1013 1014 // Load parameters 1015 const Register crc = c_rarg0; // crc 1016 const Register buf = c_rarg1; // source java byte array address 1017 const Register len = c_rarg2; // length 1018 const Register off = len; // offset (never overlaps with 'len') 1019 1020 // Arguments are reversed on java expression stack 1021 // Calculate address of start element 1022 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 1023 __ ldr(buf, Address(esp, 2*wordSize)); // long buf 1024 __ ldrw(off, Address(esp, wordSize)); // offset 1025 __ add(buf, buf, off); // + offset 1026 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC 1027 } else { 1028 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array 1029 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 1030 __ ldrw(off, Address(esp, wordSize)); // offset 1031 __ add(buf, buf, off); // + offset 1032 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC 1033 } 1034 // Can now load 'len' since we're finished with 'off' 1035 __ ldrw(len, Address(esp, 0x0)); // Length 1036 1037 __ andr(sp, r13, -16); // Restore the caller's SP 1038 1039 // We are frameless so we can just jump to the stub. 1040 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); 1041 1042 // generate a vanilla native entry as the slow path 1043 __ bind(slow_path); 1044 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 1045 return entry; 1046 } 1047 return NULL; 1048 } 1049 1050 // Not supported 1051 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1052 return NULL; 1053 } 1054 1055 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1056 // Bang each page in the shadow zone. We can't assume it's been done for 1057 // an interpreter frame with greater than a page of locals, so each page 1058 // needs to be checked. Only true for non-native. 1059 if (UseStackBanging) { 1060 const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size(); 1061 const int start_page = native_call ? n_shadow_pages : 1; 1062 const int page_size = os::vm_page_size(); 1063 for (int pages = start_page; pages <= n_shadow_pages ; pages++) { 1064 __ sub(rscratch2, sp, pages*page_size); 1065 __ str(zr, Address(rscratch2)); 1066 } 1067 } 1068 } 1069 1070 1071 // Interpreter stub for calling a native method. (asm interpreter) 1072 // This sets up a somewhat different looking stack for calling the 1073 // native method than the typical interpreter frame setup. 1074 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1075 // determine code generation flags 1076 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1077 1078 // r1: Method* 1079 // rscratch1: sender sp 1080 1081 address entry_point = __ pc(); 1082 1083 const Address constMethod (rmethod, Method::const_offset()); 1084 const Address access_flags (rmethod, Method::access_flags_offset()); 1085 const Address size_of_parameters(r2, ConstMethod:: 1086 size_of_parameters_offset()); 1087 1088 // get parameter size (always needed) 1089 __ ldr(r2, constMethod); 1090 __ load_unsigned_short(r2, size_of_parameters); 1091 1092 // Native calls don't need the stack size check since they have no 1093 // expression stack and the arguments are already on the stack and 1094 // we only add a handful of words to the stack. 1095 1096 // rmethod: Method* 1097 // r2: size of parameters 1098 // rscratch1: sender sp 1099 1100 // for natives the size of locals is zero 1101 1102 // compute beginning of parameters (rlocals) 1103 __ add(rlocals, esp, r2, ext::uxtx, 3); 1104 __ add(rlocals, rlocals, -wordSize); 1105 1106 // Pull SP back to minimum size: this avoids holes in the stack 1107 __ andr(sp, esp, -16); 1108 1109 // initialize fixed part of activation frame 1110 generate_fixed_frame(true); 1111 #ifndef PRODUCT 1112 // tell the simulator that a method has been entered 1113 if (NotifySimulator) { 1114 __ notify(Assembler::method_entry); 1115 } 1116 #endif 1117 1118 // make sure method is native & not abstract 1119 #ifdef ASSERT 1120 __ ldrw(r0, access_flags); 1121 { 1122 Label L; 1123 __ tst(r0, JVM_ACC_NATIVE); 1124 __ br(Assembler::NE, L); 1125 __ stop("tried to execute non-native method as native"); 1126 __ bind(L); 1127 } 1128 { 1129 Label L; 1130 __ tst(r0, JVM_ACC_ABSTRACT); 1131 __ br(Assembler::EQ, L); 1132 __ stop("tried to execute abstract method in interpreter"); 1133 __ bind(L); 1134 } 1135 #endif 1136 1137 // Since at this point in the method invocation the exception 1138 // handler would try to exit the monitor of synchronized methods 1139 // which hasn't been entered yet, we set the thread local variable 1140 // _do_not_unlock_if_synchronized to true. The remove_activation 1141 // will check this flag. 1142 1143 const Address do_not_unlock_if_synchronized(rthread, 1144 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1145 __ mov(rscratch2, true); 1146 __ strb(rscratch2, do_not_unlock_if_synchronized); 1147 1148 // increment invocation count & check for overflow 1149 Label invocation_counter_overflow; 1150 if (inc_counter) { 1151 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1152 } 1153 1154 Label continue_after_compile; 1155 __ bind(continue_after_compile); 1156 1157 bang_stack_shadow_pages(true); 1158 1159 // reset the _do_not_unlock_if_synchronized flag 1160 __ strb(zr, do_not_unlock_if_synchronized); 1161 1162 // check for synchronized methods 1163 // Must happen AFTER invocation_counter check and stack overflow check, 1164 // so method is not locked if overflows. 1165 if (synchronized) { 1166 lock_method(); 1167 } else { 1168 // no synchronization necessary 1169 #ifdef ASSERT 1170 { 1171 Label L; 1172 __ ldrw(r0, access_flags); 1173 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1174 __ br(Assembler::EQ, L); 1175 __ stop("method needs synchronization"); 1176 __ bind(L); 1177 } 1178 #endif 1179 } 1180 1181 // start execution 1182 #ifdef ASSERT 1183 { 1184 Label L; 1185 const Address monitor_block_top(rfp, 1186 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1187 __ ldr(rscratch1, monitor_block_top); 1188 __ cmp(esp, rscratch1); 1189 __ br(Assembler::EQ, L); 1190 __ stop("broken stack frame setup in interpreter"); 1191 __ bind(L); 1192 } 1193 #endif 1194 1195 // jvmti support 1196 __ notify_method_entry(); 1197 1198 // work registers 1199 const Register t = r17; 1200 const Register result_handler = r19; 1201 1202 // allocate space for parameters 1203 __ ldr(t, Address(rmethod, Method::const_offset())); 1204 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1205 1206 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); 1207 __ andr(sp, rscratch1, -16); 1208 __ mov(esp, rscratch1); 1209 1210 // get signature handler 1211 { 1212 Label L; 1213 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1214 __ cbnz(t, L); 1215 __ call_VM(noreg, 1216 CAST_FROM_FN_PTR(address, 1217 InterpreterRuntime::prepare_native_call), 1218 rmethod); 1219 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1220 __ bind(L); 1221 } 1222 1223 // call signature handler 1224 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1225 "adjust this code"); 1226 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1227 "adjust this code"); 1228 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1229 "adjust this code"); 1230 1231 // The generated handlers do not touch rmethod (the method). 1232 // However, large signatures cannot be cached and are generated 1233 // each time here. The slow-path generator can do a GC on return, 1234 // so we must reload it after the call. 1235 __ blr(t); 1236 __ get_method(rmethod); // slow path can do a GC, reload rmethod 1237 1238 1239 // result handler is in r0 1240 // set result handler 1241 __ mov(result_handler, r0); 1242 // pass mirror handle if static call 1243 { 1244 Label L; 1245 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1246 __ tbz(t, exact_log2(JVM_ACC_STATIC), L); 1247 // get mirror 1248 __ load_mirror(t, rmethod); 1249 // copy mirror into activation frame 1250 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1251 // pass handle to mirror 1252 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); 1253 __ bind(L); 1254 } 1255 1256 // get native function entry point in r10 1257 { 1258 Label L; 1259 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1260 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1261 __ mov(rscratch2, unsatisfied); 1262 __ ldr(rscratch2, rscratch2); 1263 __ cmp(r10, rscratch2); 1264 __ br(Assembler::NE, L); 1265 __ call_VM(noreg, 1266 CAST_FROM_FN_PTR(address, 1267 InterpreterRuntime::prepare_native_call), 1268 rmethod); 1269 __ get_method(rmethod); 1270 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1271 __ bind(L); 1272 } 1273 1274 // pass JNIEnv 1275 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); 1276 1277 // It is enough that the pc() points into the right code 1278 // segment. It does not have to be the correct return pc. 1279 __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1); 1280 1281 // change thread state 1282 #ifdef ASSERT 1283 { 1284 Label L; 1285 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); 1286 __ cmp(t, _thread_in_Java); 1287 __ br(Assembler::EQ, L); 1288 __ stop("Wrong thread state in native stub"); 1289 __ bind(L); 1290 } 1291 #endif 1292 1293 // Change state to native 1294 __ mov(rscratch1, _thread_in_native); 1295 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1296 __ stlrw(rscratch1, rscratch2); 1297 1298 // Call the native method. 1299 __ blrt(r10, rscratch1); 1300 __ maybe_isb(); 1301 __ get_method(rmethod); 1302 // result potentially in r0 or v0 1303 1304 // make room for the pushes we're about to do 1305 __ sub(rscratch1, esp, 4 * wordSize); 1306 __ andr(sp, rscratch1, -16); 1307 1308 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1309 // in order to extract the result of a method call. If the order of these 1310 // pushes change or anything else is added to the stack then the code in 1311 // interpreter_frame_result must also change. 1312 __ push(dtos); 1313 __ push(ltos); 1314 1315 // change thread state 1316 __ mov(rscratch1, _thread_in_native_trans); 1317 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1318 __ stlrw(rscratch1, rscratch2); 1319 1320 if (os::is_MP()) { 1321 if (UseMembar) { 1322 // Force this write out before the read below 1323 __ dsb(Assembler::SY); 1324 } else { 1325 // Write serialization page so VM thread can do a pseudo remote membar. 1326 // We use the current thread pointer to calculate a thread specific 1327 // offset to write to within the page. This minimizes bus traffic 1328 // due to cache line collision. 1329 __ serialize_memory(rthread, rscratch2); 1330 } 1331 } 1332 1333 // check for safepoint operation in progress and/or pending suspend requests 1334 { 1335 Label Continue; 1336 { 1337 unsigned long offset; 1338 __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset); 1339 __ ldrw(rscratch2, Address(rscratch2, offset)); 1340 } 1341 assert(SafepointSynchronize::_not_synchronized == 0, 1342 "SafepointSynchronize::_not_synchronized"); 1343 Label L; 1344 __ cbnz(rscratch2, L); 1345 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); 1346 __ cbz(rscratch2, Continue); 1347 __ bind(L); 1348 1349 // Don't use call_VM as it will see a possible pending exception 1350 // and forward it and never return here preventing us from 1351 // clearing _last_native_pc down below. So we do a runtime call by 1352 // hand. 1353 // 1354 __ mov(c_rarg0, rthread); 1355 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1356 __ blrt(rscratch2, 1, 0, 0); 1357 __ maybe_isb(); 1358 __ get_method(rmethod); 1359 __ reinit_heapbase(); 1360 __ bind(Continue); 1361 } 1362 1363 // change thread state 1364 __ mov(rscratch1, _thread_in_Java); 1365 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1366 __ stlrw(rscratch1, rscratch2); 1367 1368 // reset_last_Java_frame 1369 __ reset_last_Java_frame(true); 1370 1371 if (CheckJNICalls) { 1372 // clear_pending_jni_exception_check 1373 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1374 } 1375 1376 // reset handle block 1377 __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); 1378 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); 1379 1380 // If result is an oop unbox and store it in frame where gc will see it 1381 // and result handler will pick it up 1382 1383 { 1384 Label no_oop, not_weak, store_result; 1385 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1386 __ cmp(t, result_handler); 1387 __ br(Assembler::NE, no_oop); 1388 // Unbox oop result, e.g. JNIHandles::resolve result. 1389 __ pop(ltos); 1390 __ resolve_jobject(r0, rthread, t); 1391 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 1392 // keep stack depth as expected by pushing oop which will eventually be discarded 1393 __ push(ltos); 1394 __ bind(no_oop); 1395 } 1396 1397 { 1398 Label no_reguard; 1399 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1400 __ ldrw(rscratch1, Address(rscratch1)); 1401 __ cmp(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); 1402 __ br(Assembler::NE, no_reguard); 1403 1404 __ pusha(); // XXX only save smashed registers 1405 __ mov(c_rarg0, rthread); 1406 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1407 __ blrt(rscratch2, 0, 0, 0); 1408 __ popa(); // XXX only restore smashed registers 1409 __ bind(no_reguard); 1410 } 1411 1412 // The method register is junk from after the thread_in_native transition 1413 // until here. Also can't call_VM until the bcp has been 1414 // restored. Need bcp for throwing exception below so get it now. 1415 __ get_method(rmethod); 1416 1417 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1418 // rbcp == code_base() 1419 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* 1420 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1421 // handle exceptions (exception handling will handle unlocking!) 1422 { 1423 Label L; 1424 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 1425 __ cbz(rscratch1, L); 1426 // Note: At some point we may want to unify this with the code 1427 // used in call_VM_base(); i.e., we should use the 1428 // StubRoutines::forward_exception code. For now this doesn't work 1429 // here because the rsp is not correctly set at this point. 1430 __ MacroAssembler::call_VM(noreg, 1431 CAST_FROM_FN_PTR(address, 1432 InterpreterRuntime::throw_pending_exception)); 1433 __ should_not_reach_here(); 1434 __ bind(L); 1435 } 1436 1437 // do unlocking if necessary 1438 { 1439 Label L; 1440 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1441 __ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L); 1442 // the code below should be shared with interpreter macro 1443 // assembler implementation 1444 { 1445 Label unlock; 1446 // BasicObjectLock will be first in list, since this is a 1447 // synchronized method. However, need to check that the object 1448 // has not been unlocked by an explicit monitorexit bytecode. 1449 1450 // monitor expect in c_rarg1 for slow unlock path 1451 __ lea (c_rarg1, Address(rfp, // address of first monitor 1452 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1453 wordSize - sizeof(BasicObjectLock)))); 1454 1455 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1456 __ cbnz(t, unlock); 1457 1458 // Entry already unlocked, need to throw exception 1459 __ MacroAssembler::call_VM(noreg, 1460 CAST_FROM_FN_PTR(address, 1461 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1462 __ should_not_reach_here(); 1463 1464 __ bind(unlock); 1465 __ unlock_object(c_rarg1); 1466 } 1467 __ bind(L); 1468 } 1469 1470 // jvmti support 1471 // Note: This must happen _after_ handling/throwing any exceptions since 1472 // the exception handler code notifies the runtime of method exits 1473 // too. If this happens before, method entry/exit notifications are 1474 // not properly paired (was bug - gri 11/22/99). 1475 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1476 1477 // restore potential result in r0:d0, call result handler to 1478 // restore potential result in ST0 & handle result 1479 1480 __ pop(ltos); 1481 __ pop(dtos); 1482 1483 __ blr(result_handler); 1484 1485 // remove activation 1486 __ ldr(esp, Address(rfp, 1487 frame::interpreter_frame_sender_sp_offset * 1488 wordSize)); // get sender sp 1489 // remove frame anchor 1490 __ leave(); 1491 1492 // resture sender sp 1493 __ mov(sp, esp); 1494 1495 __ ret(lr); 1496 1497 if (inc_counter) { 1498 // Handle overflow of counter and compile method 1499 __ bind(invocation_counter_overflow); 1500 generate_counter_overflow(continue_after_compile); 1501 } 1502 1503 return entry_point; 1504 } 1505 1506 // 1507 // Generic interpreted method entry to (asm) interpreter 1508 // 1509 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1510 // determine code generation flags 1511 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1512 1513 // rscratch1: sender sp 1514 address entry_point = __ pc(); 1515 1516 const Address constMethod(rmethod, Method::const_offset()); 1517 const Address access_flags(rmethod, Method::access_flags_offset()); 1518 const Address size_of_parameters(r3, 1519 ConstMethod::size_of_parameters_offset()); 1520 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); 1521 1522 // get parameter size (always needed) 1523 // need to load the const method first 1524 __ ldr(r3, constMethod); 1525 __ load_unsigned_short(r2, size_of_parameters); 1526 1527 // r2: size of parameters 1528 1529 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words 1530 __ sub(r3, r3, r2); // r3 = no. of additional locals 1531 1532 // see if we've got enough room on the stack for locals plus overhead. 1533 generate_stack_overflow_check(); 1534 1535 // compute beginning of parameters (rlocals) 1536 __ add(rlocals, esp, r2, ext::uxtx, 3); 1537 __ sub(rlocals, rlocals, wordSize); 1538 1539 // Make room for locals 1540 __ sub(rscratch1, esp, r3, ext::uxtx, 3); 1541 __ andr(sp, rscratch1, -16); 1542 1543 // r3 - # of additional locals 1544 // allocate space for locals 1545 // explicitly initialize locals 1546 { 1547 Label exit, loop; 1548 __ ands(zr, r3, r3); 1549 __ br(Assembler::LE, exit); // do nothing if r3 <= 0 1550 __ bind(loop); 1551 __ str(zr, Address(__ post(rscratch1, wordSize))); 1552 __ sub(r3, r3, 1); // until everything initialized 1553 __ cbnz(r3, loop); 1554 __ bind(exit); 1555 } 1556 1557 // And the base dispatch table 1558 __ get_dispatch(); 1559 1560 // initialize fixed part of activation frame 1561 generate_fixed_frame(false); 1562 #ifndef PRODUCT 1563 // tell the simulator that a method has been entered 1564 if (NotifySimulator) { 1565 __ notify(Assembler::method_entry); 1566 } 1567 #endif 1568 // make sure method is not native & not abstract 1569 #ifdef ASSERT 1570 __ ldrw(r0, access_flags); 1571 { 1572 Label L; 1573 __ tst(r0, JVM_ACC_NATIVE); 1574 __ br(Assembler::EQ, L); 1575 __ stop("tried to execute native method as non-native"); 1576 __ bind(L); 1577 } 1578 { 1579 Label L; 1580 __ tst(r0, JVM_ACC_ABSTRACT); 1581 __ br(Assembler::EQ, L); 1582 __ stop("tried to execute abstract method in interpreter"); 1583 __ bind(L); 1584 } 1585 #endif 1586 1587 // Since at this point in the method invocation the exception 1588 // handler would try to exit the monitor of synchronized methods 1589 // which hasn't been entered yet, we set the thread local variable 1590 // _do_not_unlock_if_synchronized to true. The remove_activation 1591 // will check this flag. 1592 1593 const Address do_not_unlock_if_synchronized(rthread, 1594 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1595 __ mov(rscratch2, true); 1596 __ strb(rscratch2, do_not_unlock_if_synchronized); 1597 1598 // increment invocation count & check for overflow 1599 Label invocation_counter_overflow; 1600 Label profile_method; 1601 Label profile_method_continue; 1602 if (inc_counter) { 1603 generate_counter_incr(&invocation_counter_overflow, 1604 &profile_method, 1605 &profile_method_continue); 1606 if (ProfileInterpreter) { 1607 __ bind(profile_method_continue); 1608 } 1609 } 1610 1611 Label continue_after_compile; 1612 __ bind(continue_after_compile); 1613 1614 bang_stack_shadow_pages(false); 1615 1616 // reset the _do_not_unlock_if_synchronized flag 1617 __ strb(zr, do_not_unlock_if_synchronized); 1618 1619 // check for synchronized methods 1620 // Must happen AFTER invocation_counter check and stack overflow check, 1621 // so method is not locked if overflows. 1622 if (synchronized) { 1623 // Allocate monitor and lock method 1624 lock_method(); 1625 } else { 1626 // no synchronization necessary 1627 #ifdef ASSERT 1628 { 1629 Label L; 1630 __ ldrw(r0, access_flags); 1631 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1632 __ br(Assembler::EQ, L); 1633 __ stop("method needs synchronization"); 1634 __ bind(L); 1635 } 1636 #endif 1637 } 1638 1639 // start execution 1640 #ifdef ASSERT 1641 { 1642 Label L; 1643 const Address monitor_block_top (rfp, 1644 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1645 __ ldr(rscratch1, monitor_block_top); 1646 __ cmp(esp, rscratch1); 1647 __ br(Assembler::EQ, L); 1648 __ stop("broken stack frame setup in interpreter"); 1649 __ bind(L); 1650 } 1651 #endif 1652 1653 // jvmti support 1654 __ notify_method_entry(); 1655 1656 __ dispatch_next(vtos); 1657 1658 // invocation counter overflow 1659 if (inc_counter) { 1660 if (ProfileInterpreter) { 1661 // We have decided to profile this method in the interpreter 1662 __ bind(profile_method); 1663 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1664 __ set_method_data_pointer_for_bcp(); 1665 // don't think we need this 1666 __ get_method(r1); 1667 __ b(profile_method_continue); 1668 } 1669 // Handle overflow of counter and compile method 1670 __ bind(invocation_counter_overflow); 1671 generate_counter_overflow(continue_after_compile); 1672 } 1673 1674 return entry_point; 1675 } 1676 1677 //----------------------------------------------------------------------------- 1678 // Exceptions 1679 1680 void TemplateInterpreterGenerator::generate_throw_exception() { 1681 // Entry point in previous activation (i.e., if the caller was 1682 // interpreted) 1683 Interpreter::_rethrow_exception_entry = __ pc(); 1684 // Restore sp to interpreter_frame_last_sp even though we are going 1685 // to empty the expression stack for the exception processing. 1686 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1687 // r0: exception 1688 // r3: return address/pc that threw exception 1689 __ restore_bcp(); // rbcp points to call/send 1690 __ restore_locals(); 1691 __ restore_constant_pool_cache(); 1692 __ reinit_heapbase(); // restore rheapbase as heapbase. 1693 __ get_dispatch(); 1694 1695 #ifndef PRODUCT 1696 // tell the simulator that the caller method has been reentered 1697 if (NotifySimulator) { 1698 __ get_method(rmethod); 1699 __ notify(Assembler::method_reentry); 1700 } 1701 #endif 1702 // Entry point for exceptions thrown within interpreter code 1703 Interpreter::_throw_exception_entry = __ pc(); 1704 // If we came here via a NullPointerException on the receiver of a 1705 // method, rmethod may be corrupt. 1706 __ get_method(rmethod); 1707 // expression stack is undefined here 1708 // r0: exception 1709 // rbcp: exception bcp 1710 __ verify_oop(r0); 1711 __ mov(c_rarg1, r0); 1712 1713 // expression stack must be empty before entering the VM in case of 1714 // an exception 1715 __ empty_expression_stack(); 1716 // find exception handler address and preserve exception oop 1717 __ call_VM(r3, 1718 CAST_FROM_FN_PTR(address, 1719 InterpreterRuntime::exception_handler_for_exception), 1720 c_rarg1); 1721 1722 // Calculate stack limit 1723 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1724 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1725 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1726 __ ldr(rscratch2, 1727 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1728 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 1729 __ andr(sp, rscratch1, -16); 1730 1731 // r0: exception handler entry point 1732 // r3: preserved exception oop 1733 // rbcp: bcp for exception handler 1734 __ push_ptr(r3); // push exception which is now the only value on the stack 1735 __ br(r0); // jump to exception handler (may be _remove_activation_entry!) 1736 1737 // If the exception is not handled in the current frame the frame is 1738 // removed and the exception is rethrown (i.e. exception 1739 // continuation is _rethrow_exception). 1740 // 1741 // Note: At this point the bci is still the bxi for the instruction 1742 // which caused the exception and the expression stack is 1743 // empty. Thus, for any VM calls at this point, GC will find a legal 1744 // oop map (with empty expression stack). 1745 1746 // 1747 // JVMTI PopFrame support 1748 // 1749 1750 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1751 __ empty_expression_stack(); 1752 // Set the popframe_processing bit in pending_popframe_condition 1753 // indicating that we are currently handling popframe, so that 1754 // call_VMs that may happen later do not trigger new popframe 1755 // handling cycles. 1756 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1757 __ orr(r3, r3, JavaThread::popframe_processing_bit); 1758 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1759 1760 { 1761 // Check to see whether we are returning to a deoptimized frame. 1762 // (The PopFrame call ensures that the caller of the popped frame is 1763 // either interpreted or compiled and deoptimizes it if compiled.) 1764 // In this case, we can't call dispatch_next() after the frame is 1765 // popped, but instead must save the incoming arguments and restore 1766 // them after deoptimization has occurred. 1767 // 1768 // Note that we don't compare the return PC against the 1769 // deoptimization blob's unpack entry because of the presence of 1770 // adapter frames in C2. 1771 Label caller_not_deoptimized; 1772 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); 1773 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1774 InterpreterRuntime::interpreter_contains), c_rarg1); 1775 __ cbnz(r0, caller_not_deoptimized); 1776 1777 // Compute size of arguments for saving when returning to 1778 // deoptimized caller 1779 __ get_method(r0); 1780 __ ldr(r0, Address(r0, Method::const_offset())); 1781 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: 1782 size_of_parameters_offset()))); 1783 __ lsl(r0, r0, Interpreter::logStackElementSize); 1784 __ restore_locals(); // XXX do we need this? 1785 __ sub(rlocals, rlocals, r0); 1786 __ add(rlocals, rlocals, wordSize); 1787 // Save these arguments 1788 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1789 Deoptimization:: 1790 popframe_preserve_args), 1791 rthread, r0, rlocals); 1792 1793 __ remove_activation(vtos, 1794 /* throw_monitor_exception */ false, 1795 /* install_monitor_exception */ false, 1796 /* notify_jvmdi */ false); 1797 1798 // Inform deoptimization that it is responsible for restoring 1799 // these arguments 1800 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1801 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 1802 1803 // Continue in deoptimization handler 1804 __ ret(lr); 1805 1806 __ bind(caller_not_deoptimized); 1807 } 1808 1809 __ remove_activation(vtos, 1810 /* throw_monitor_exception */ false, 1811 /* install_monitor_exception */ false, 1812 /* notify_jvmdi */ false); 1813 1814 // Restore the last_sp and null it out 1815 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1816 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1817 1818 __ restore_bcp(); 1819 __ restore_locals(); 1820 __ restore_constant_pool_cache(); 1821 __ get_method(rmethod); 1822 1823 // The method data pointer was incremented already during 1824 // call profiling. We have to restore the mdp for the current bcp. 1825 if (ProfileInterpreter) { 1826 __ set_method_data_pointer_for_bcp(); 1827 } 1828 1829 // Clear the popframe condition flag 1830 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); 1831 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1832 1833 #if INCLUDE_JVMTI 1834 { 1835 Label L_done; 1836 1837 __ ldrb(rscratch1, Address(rbcp, 0)); 1838 __ cmpw(r1, Bytecodes::_invokestatic); 1839 __ br(Assembler::EQ, L_done); 1840 1841 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1842 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1843 1844 __ ldr(c_rarg0, Address(rlocals, 0)); 1845 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); 1846 1847 __ cbz(r0, L_done); 1848 1849 __ str(r0, Address(esp, 0)); 1850 __ bind(L_done); 1851 } 1852 #endif // INCLUDE_JVMTI 1853 1854 // Restore machine SP 1855 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1856 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1857 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1858 __ ldr(rscratch2, 1859 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1860 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 1861 __ andr(sp, rscratch1, -16); 1862 1863 __ dispatch_next(vtos); 1864 // end of PopFrame support 1865 1866 Interpreter::_remove_activation_entry = __ pc(); 1867 1868 // preserve exception over this code sequence 1869 __ pop_ptr(r0); 1870 __ str(r0, Address(rthread, JavaThread::vm_result_offset())); 1871 // remove the activation (without doing throws on illegalMonitorExceptions) 1872 __ remove_activation(vtos, false, true, false); 1873 // restore exception 1874 // restore exception 1875 __ get_vm_result(r0, rthread); 1876 1877 // In between activations - previous activation type unknown yet 1878 // compute continuation point - the continuation point expects the 1879 // following registers set up: 1880 // 1881 // r0: exception 1882 // lr: return address/pc that threw exception 1883 // rsp: expression stack of caller 1884 // rfp: fp of caller 1885 // FIXME: There's no point saving LR here because VM calls don't trash it 1886 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address 1887 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1888 SharedRuntime::exception_handler_for_return_address), 1889 rthread, lr); 1890 __ mov(r1, r0); // save exception handler 1891 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address 1892 // We might be returning to a deopt handler that expects r3 to 1893 // contain the exception pc 1894 __ mov(r3, lr); 1895 // Note that an "issuing PC" is actually the next PC after the call 1896 __ br(r1); // jump to exception 1897 // handler of caller 1898 } 1899 1900 1901 // 1902 // JVMTI ForceEarlyReturn support 1903 // 1904 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1905 address entry = __ pc(); 1906 1907 __ restore_bcp(); 1908 __ restore_locals(); 1909 __ empty_expression_stack(); 1910 __ load_earlyret_value(state); 1911 1912 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 1913 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); 1914 1915 // Clear the earlyret state 1916 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1917 __ str(zr, cond_addr); 1918 1919 __ remove_activation(state, 1920 false, /* throw_monitor_exception */ 1921 false, /* install_monitor_exception */ 1922 true); /* notify_jvmdi */ 1923 __ ret(lr); 1924 1925 return entry; 1926 } // end of ForceEarlyReturn support 1927 1928 1929 1930 //----------------------------------------------------------------------------- 1931 // Helper for vtos entry point generation 1932 1933 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1934 address& bep, 1935 address& cep, 1936 address& sep, 1937 address& aep, 1938 address& iep, 1939 address& lep, 1940 address& fep, 1941 address& dep, 1942 address& vep) { 1943 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1944 Label L; 1945 aep = __ pc(); __ push_ptr(); __ b(L); 1946 fep = __ pc(); __ push_f(); __ b(L); 1947 dep = __ pc(); __ push_d(); __ b(L); 1948 lep = __ pc(); __ push_l(); __ b(L); 1949 bep = cep = sep = 1950 iep = __ pc(); __ push_i(); 1951 vep = __ pc(); 1952 __ bind(L); 1953 generate_and_dispatch(t); 1954 } 1955 1956 //----------------------------------------------------------------------------- 1957 1958 // Non-product code 1959 #ifndef PRODUCT 1960 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1961 address entry = __ pc(); 1962 1963 __ push(lr); 1964 __ push(state); 1965 __ push(RegSet::range(r0, r15), sp); 1966 __ mov(c_rarg2, r0); // Pass itos 1967 __ call_VM(noreg, 1968 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1969 c_rarg1, c_rarg2, c_rarg3); 1970 __ pop(RegSet::range(r0, r15), sp); 1971 __ pop(state); 1972 __ pop(lr); 1973 __ ret(lr); // return from result handler 1974 1975 return entry; 1976 } 1977 1978 void TemplateInterpreterGenerator::count_bytecode() { 1979 Register rscratch3 = r0; 1980 __ push(rscratch1); 1981 __ push(rscratch2); 1982 __ push(rscratch3); 1983 __ mov(rscratch3, (address) &BytecodeCounter::_counter_value); 1984 __ atomic_add(noreg, 1, rscratch3); 1985 __ pop(rscratch3); 1986 __ pop(rscratch2); 1987 __ pop(rscratch1); 1988 } 1989 1990 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } 1991 1992 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } 1993 1994 1995 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1996 // Call a little run-time stub to avoid blow-up for each bytecode. 1997 // The run-time runtime saves the right registers, depending on 1998 // the tosca in-state for the given template. 1999 2000 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2001 "entry must have been generated"); 2002 __ bl(Interpreter::trace_code(t->tos_in())); 2003 __ reinit_heapbase(); 2004 } 2005 2006 2007 void TemplateInterpreterGenerator::stop_interpreter_at() { 2008 Label L; 2009 __ push(rscratch1); 2010 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value); 2011 __ ldr(rscratch1, Address(rscratch1)); 2012 __ mov(rscratch2, StopInterpreterAt); 2013 __ cmpw(rscratch1, rscratch2); 2014 __ br(Assembler::NE, L); 2015 __ brk(0); 2016 __ bind(L); 2017 __ pop(rscratch1); 2018 } 2019 2020 #ifdef BUILTIN_SIM 2021 2022 #include <sys/mman.h> 2023 #include <unistd.h> 2024 2025 extern "C" { 2026 static int PAGESIZE = getpagesize(); 2027 int is_mapped_address(u_int64_t address) 2028 { 2029 address = (address & ~((u_int64_t)PAGESIZE - 1)); 2030 if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) { 2031 return true; 2032 } 2033 if (errno != ENOMEM) { 2034 return true; 2035 } 2036 return false; 2037 } 2038 2039 void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2040 { 2041 if (method != 0) { 2042 method[0] = '\0'; 2043 } 2044 if (bcidx != 0) { 2045 *bcidx = -2; 2046 } 2047 if (decode != 0) { 2048 decode[0] = 0; 2049 } 2050 2051 if (framesize != 0) { 2052 *framesize = -1; 2053 } 2054 2055 if (Interpreter::contains((address)pc)) { 2056 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); 2057 Method* meth; 2058 address bcp; 2059 if (fp) { 2060 #define FRAME_SLOT_METHOD 3 2061 #define FRAME_SLOT_BCP 7 2062 meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3)); 2063 bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3)); 2064 #undef FRAME_SLOT_METHOD 2065 #undef FRAME_SLOT_BCP 2066 } else { 2067 meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0); 2068 bcp = (address)sim->getCPUState().xreg(RBCP, 0); 2069 } 2070 if (meth->is_native()) { 2071 return; 2072 } 2073 if(method && meth->is_method()) { 2074 ResourceMark rm; 2075 method[0] = 'I'; 2076 method[1] = ' '; 2077 meth->name_and_sig_as_C_string(method + 2, 398); 2078 } 2079 if (bcidx) { 2080 if (meth->contains(bcp)) { 2081 *bcidx = meth->bci_from(bcp); 2082 } else { 2083 *bcidx = -2; 2084 } 2085 } 2086 if (decode) { 2087 if (!BytecodeTracer::closure()) { 2088 BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 2089 } 2090 stringStream str(decode, 400); 2091 BytecodeTracer::trace(meth, bcp, &str); 2092 } 2093 } else { 2094 if (method) { 2095 CodeBlob *cb = CodeCache::find_blob((address)pc); 2096 if (cb != NULL) { 2097 if (cb->is_nmethod()) { 2098 ResourceMark rm; 2099 nmethod* nm = (nmethod*)cb; 2100 method[0] = 'C'; 2101 method[1] = ' '; 2102 nm->method()->name_and_sig_as_C_string(method + 2, 398); 2103 } else if (cb->is_adapter_blob()) { 2104 strcpy(method, "B adapter blob"); 2105 } else if (cb->is_runtime_stub()) { 2106 strcpy(method, "B runtime stub"); 2107 } else if (cb->is_exception_stub()) { 2108 strcpy(method, "B exception stub"); 2109 } else if (cb->is_deoptimization_stub()) { 2110 strcpy(method, "B deoptimization stub"); 2111 } else if (cb->is_safepoint_stub()) { 2112 strcpy(method, "B safepoint stub"); 2113 } else if (cb->is_uncommon_trap_stub()) { 2114 strcpy(method, "B uncommon trap stub"); 2115 } else if (cb->contains((address)StubRoutines::call_stub())) { 2116 strcpy(method, "B call stub"); 2117 } else { 2118 strcpy(method, "B unknown blob : "); 2119 strcat(method, cb->name()); 2120 } 2121 if (framesize != NULL) { 2122 *framesize = cb->frame_size(); 2123 } 2124 } 2125 } 2126 } 2127 } 2128 2129 2130 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2131 { 2132 bccheck1(pc, fp, method, bcidx, framesize, decode); 2133 } 2134 } 2135 2136 #endif // BUILTIN_SIM 2137 #endif // !PRODUCT