1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interp_masm_aarch64.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "logging/log.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/markOop.hpp" 35 #include "oops/method.hpp" 36 #include "oops/methodData.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/basicLock.hpp" 40 #include "runtime/biasedLocking.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/safepointMechanism.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/thread.inline.hpp" 45 46 47 void InterpreterMacroAssembler::narrow(Register result) { 48 49 // Get method->_constMethod->_result_type 50 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize)); 51 ldr(rscratch1, Address(rscratch1, Method::const_offset())); 52 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset())); 53 54 Label done, notBool, notByte, notChar; 55 56 // common case first 57 cmpw(rscratch1, T_INT); 58 br(Assembler::EQ, done); 59 60 // mask integer result to narrower return type. 61 cmpw(rscratch1, T_BOOLEAN); 62 br(Assembler::NE, notBool); 63 andw(result, result, 0x1); 64 b(done); 65 66 bind(notBool); 67 cmpw(rscratch1, T_BYTE); 68 br(Assembler::NE, notByte); 69 sbfx(result, result, 0, 8); 70 b(done); 71 72 bind(notByte); 73 cmpw(rscratch1, T_CHAR); 74 br(Assembler::NE, notChar); 75 ubfx(result, result, 0, 16); // truncate upper 16 bits 76 b(done); 77 78 bind(notChar); 79 sbfx(result, result, 0, 16); // sign-extend short 80 81 // Nothing to do for T_INT 82 bind(done); 83 } 84 85 void InterpreterMacroAssembler::jump_to_entry(address entry) { 86 assert(entry, "Entry must have been generated by now"); 87 b(entry); 88 } 89 90 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { 91 if (JvmtiExport::can_pop_frame()) { 92 Label L; 93 // Initiate popframe handling only if it is not already being 94 // processed. If the flag has the popframe_processing bit set, it 95 // means that this code is called *during* popframe handling - we 96 // don't want to reenter. 97 // This method is only called just after the call into the vm in 98 // call_VM_base, so the arg registers are available. 99 ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 100 tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L); 101 tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L); 102 // Call Interpreter::remove_activation_preserving_args_entry() to get the 103 // address of the same-named entrypoint in the generated interpreter code. 104 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 105 br(r0); 106 bind(L); 107 } 108 } 109 110 111 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 112 ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset())); 113 const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset()); 114 const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset()); 115 const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset()); 116 switch (state) { 117 case atos: ldr(r0, oop_addr); 118 str(zr, oop_addr); 119 verify_oop(r0, state); break; 120 case ltos: ldr(r0, val_addr); break; 121 case btos: // fall through 122 case ztos: // fall through 123 case ctos: // fall through 124 case stos: // fall through 125 case itos: ldrw(r0, val_addr); break; 126 case ftos: ldrs(v0, val_addr); break; 127 case dtos: ldrd(v0, val_addr); break; 128 case vtos: /* nothing to do */ break; 129 default : ShouldNotReachHere(); 130 } 131 // Clean up tos value in the thread object 132 movw(rscratch1, (int) ilgl); 133 strw(rscratch1, tos_addr); 134 strw(zr, val_addr); 135 } 136 137 138 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { 139 if (JvmtiExport::can_force_early_return()) { 140 Label L; 141 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 142 cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit; 143 144 // Initiate earlyret handling only if it is not already being processed. 145 // If the flag has the earlyret_processing bit set, it means that this code 146 // is called *during* earlyret handling - we don't want to reenter. 147 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset())); 148 cmpw(rscratch1, JvmtiThreadState::earlyret_pending); 149 br(Assembler::NE, L); 150 151 // Call Interpreter::remove_activation_early_entry() to get the address of the 152 // same-named entrypoint in the generated interpreter code. 153 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 154 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset())); 155 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1); 156 br(r0); 157 bind(L); 158 } 159 } 160 161 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp( 162 Register reg, 163 int bcp_offset) { 164 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); 165 ldrh(reg, Address(rbcp, bcp_offset)); 166 rev16(reg, reg); 167 } 168 169 void InterpreterMacroAssembler::get_dispatch() { 170 unsigned long offset; 171 adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset); 172 lea(rdispatch, Address(rdispatch, offset)); 173 } 174 175 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 176 int bcp_offset, 177 size_t index_size) { 178 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 179 if (index_size == sizeof(u2)) { 180 load_unsigned_short(index, Address(rbcp, bcp_offset)); 181 } else if (index_size == sizeof(u4)) { 182 // assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 183 ldrw(index, Address(rbcp, bcp_offset)); 184 // Check if the secondary index definition is still ~x, otherwise 185 // we have to change the following assembler code to calculate the 186 // plain index. 187 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 188 eonw(index, index, zr); // convert to plain index 189 } else if (index_size == sizeof(u1)) { 190 load_unsigned_byte(index, Address(rbcp, bcp_offset)); 191 } else { 192 ShouldNotReachHere(); 193 } 194 } 195 196 // Return 197 // Rindex: index into constant pool 198 // Rcache: address of cache entry - ConstantPoolCache::base_offset() 199 // 200 // A caller must add ConstantPoolCache::base_offset() to Rcache to get 201 // the true address of the cache entry. 202 // 203 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 204 Register index, 205 int bcp_offset, 206 size_t index_size) { 207 assert_different_registers(cache, index); 208 assert_different_registers(cache, rcpool); 209 get_cache_index_at_bcp(index, bcp_offset, index_size); 210 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 211 // convert from field index to ConstantPoolCacheEntry 212 // aarch64 already has the cache in rcpool so there is no need to 213 // install it in cache. instead we pre-add the indexed offset to 214 // rcpool and return it in cache. All clients of this method need to 215 // be modified accordingly. 216 add(cache, rcpool, index, Assembler::LSL, 5); 217 } 218 219 220 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 221 Register index, 222 Register bytecode, 223 int byte_no, 224 int bcp_offset, 225 size_t index_size) { 226 get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); 227 // We use a 32-bit load here since the layout of 64-bit words on 228 // little-endian machines allow us that. 229 // n.b. unlike x86 cache already includes the index offset 230 lea(bytecode, Address(cache, 231 ConstantPoolCache::base_offset() 232 + ConstantPoolCacheEntry::indices_offset())); 233 ldarw(bytecode, bytecode); 234 const int shift_count = (1 + byte_no) * BitsPerByte; 235 ubfx(bytecode, bytecode, shift_count, BitsPerByte); 236 } 237 238 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 239 Register tmp, 240 int bcp_offset, 241 size_t index_size) { 242 assert(cache != tmp, "must use different register"); 243 get_cache_index_at_bcp(tmp, bcp_offset, index_size); 244 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 245 // convert from field index to ConstantPoolCacheEntry index 246 // and from word offset to byte offset 247 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); 248 ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize)); 249 // skip past the header 250 add(cache, cache, in_bytes(ConstantPoolCache::base_offset())); 251 add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry 252 } 253 254 void InterpreterMacroAssembler::get_method_counters(Register method, 255 Register mcs, Label& skip) { 256 Label has_counters; 257 ldr(mcs, Address(method, Method::method_counters_offset())); 258 cbnz(mcs, has_counters); 259 call_VM(noreg, CAST_FROM_FN_PTR(address, 260 InterpreterRuntime::build_method_counters), method); 261 ldr(mcs, Address(method, Method::method_counters_offset())); 262 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory 263 bind(has_counters); 264 } 265 266 // Load object from cpool->resolved_references(index) 267 void InterpreterMacroAssembler::load_resolved_reference_at_index( 268 Register result, Register index, Register tmp) { 269 assert_different_registers(result, index); 270 // convert from field index to resolved_references() index and from 271 // word index to byte offset. Since this is a java object, it can be compressed 272 lslw(index, index, LogBytesPerHeapOop); 273 274 get_constant_pool(result); 275 // load pointer for resolved_references[] objArray 276 ldr(result, Address(result, ConstantPool::cache_offset_in_bytes())); 277 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); 278 resolve_oop_handle(result, tmp); 279 // Add in the index 280 add(result, result, index); 281 load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 282 } 283 284 void InterpreterMacroAssembler::load_resolved_klass_at_offset( 285 Register cpool, Register index, Register klass, Register temp) { 286 add(temp, cpool, index, LSL, LogBytesPerWord); 287 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index 288 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses 289 add(klass, klass, temp, LSL, LogBytesPerWord); 290 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes())); 291 } 292 293 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a 294 // subtype of super_klass. 295 // 296 // Args: 297 // r0: superklass 298 // Rsub_klass: subklass 299 // 300 // Kills: 301 // r2, r5 302 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 303 Label& ok_is_subtype) { 304 assert(Rsub_klass != r0, "r0 holds superklass"); 305 assert(Rsub_klass != r2, "r2 holds 2ndary super array length"); 306 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr"); 307 308 // Profile the not-null value's klass. 309 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5 310 311 // Do the check. 312 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2 313 314 // Profile the failure of the check. 315 profile_typecheck_failed(r2); // blows r2 316 } 317 318 // Java Expression Stack 319 320 void InterpreterMacroAssembler::pop_ptr(Register r) { 321 ldr(r, post(esp, wordSize)); 322 } 323 324 void InterpreterMacroAssembler::pop_i(Register r) { 325 ldrw(r, post(esp, wordSize)); 326 } 327 328 void InterpreterMacroAssembler::pop_l(Register r) { 329 ldr(r, post(esp, 2 * Interpreter::stackElementSize)); 330 } 331 332 void InterpreterMacroAssembler::push_ptr(Register r) { 333 str(r, pre(esp, -wordSize)); 334 } 335 336 void InterpreterMacroAssembler::push_i(Register r) { 337 str(r, pre(esp, -wordSize)); 338 } 339 340 void InterpreterMacroAssembler::push_l(Register r) { 341 str(zr, pre(esp, -wordSize)); 342 str(r, pre(esp, - wordSize)); 343 } 344 345 void InterpreterMacroAssembler::pop_f(FloatRegister r) { 346 ldrs(r, post(esp, wordSize)); 347 } 348 349 void InterpreterMacroAssembler::pop_d(FloatRegister r) { 350 ldrd(r, post(esp, 2 * Interpreter::stackElementSize)); 351 } 352 353 void InterpreterMacroAssembler::push_f(FloatRegister r) { 354 strs(r, pre(esp, -wordSize)); 355 } 356 357 void InterpreterMacroAssembler::push_d(FloatRegister r) { 358 strd(r, pre(esp, 2* -wordSize)); 359 } 360 361 void InterpreterMacroAssembler::pop(TosState state) { 362 switch (state) { 363 case atos: pop_ptr(); break; 364 case btos: 365 case ztos: 366 case ctos: 367 case stos: 368 case itos: pop_i(); break; 369 case ltos: pop_l(); break; 370 case ftos: pop_f(); break; 371 case dtos: pop_d(); break; 372 case vtos: /* nothing to do */ break; 373 default: ShouldNotReachHere(); 374 } 375 verify_oop(r0, state); 376 } 377 378 void InterpreterMacroAssembler::push(TosState state) { 379 verify_oop(r0, state); 380 switch (state) { 381 case atos: push_ptr(); break; 382 case btos: 383 case ztos: 384 case ctos: 385 case stos: 386 case itos: push_i(); break; 387 case ltos: push_l(); break; 388 case ftos: push_f(); break; 389 case dtos: push_d(); break; 390 case vtos: /* nothing to do */ break; 391 default : ShouldNotReachHere(); 392 } 393 } 394 395 // Helpers for swap and dup 396 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 397 ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); 398 } 399 400 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 401 str(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); 402 } 403 404 void InterpreterMacroAssembler::load_float(Address src) { 405 ldrs(v0, src); 406 } 407 408 void InterpreterMacroAssembler::load_double(Address src) { 409 ldrd(v0, src); 410 } 411 412 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { 413 // set sender sp 414 mov(r13, sp); 415 // record last_sp 416 str(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 417 } 418 419 // Jump to from_interpreted entry of a call unless single stepping is possible 420 // in this thread in which case we must call the i2i entry 421 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { 422 prepare_to_jump_from_interpreted(); 423 424 if (JvmtiExport::can_post_interpreter_events()) { 425 Label run_compiled_code; 426 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 427 // compiled code in threads for which the event is enabled. Check here for 428 // interp_only_mode if these events CAN be enabled. 429 ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset())); 430 cbzw(rscratch1, run_compiled_code); 431 ldr(rscratch1, Address(method, Method::interpreter_entry_offset())); 432 br(rscratch1); 433 bind(run_compiled_code); 434 } 435 436 ldr(rscratch1, Address(method, Method::from_interpreted_offset())); 437 br(rscratch1); 438 } 439 440 // The following two routines provide a hook so that an implementation 441 // can schedule the dispatch in two parts. amd64 does not do this. 442 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { 443 } 444 445 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { 446 dispatch_next(state, step); 447 } 448 449 void InterpreterMacroAssembler::dispatch_base(TosState state, 450 address* table, 451 bool verifyoop, 452 bool generate_poll) { 453 if (VerifyActivationFrameSize) { 454 Unimplemented(); 455 } 456 if (verifyoop) { 457 verify_oop(r0, state); 458 } 459 460 Label safepoint; 461 address* const safepoint_table = Interpreter::safept_table(state); 462 bool needs_thread_local_poll = generate_poll && 463 SafepointMechanism::uses_thread_local_poll() && table != safepoint_table; 464 465 if (needs_thread_local_poll) { 466 NOT_PRODUCT(block_comment("Thread-local Safepoint poll")); 467 ldr(rscratch2, Address(rthread, Thread::polling_page_offset())); 468 tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint); 469 } 470 471 if (table == Interpreter::dispatch_table(state)) { 472 addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state)); 473 ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3))); 474 } else { 475 mov(rscratch2, (address)table); 476 ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3))); 477 } 478 br(rscratch2); 479 480 if (needs_thread_local_poll) { 481 bind(safepoint); 482 lea(rscratch2, ExternalAddress((address)safepoint_table)); 483 ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3))); 484 br(rscratch2); 485 } 486 } 487 488 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) { 489 dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll); 490 } 491 492 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { 493 dispatch_base(state, Interpreter::normal_table(state)); 494 } 495 496 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { 497 dispatch_base(state, Interpreter::normal_table(state), false); 498 } 499 500 501 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) { 502 // load next bytecode 503 ldrb(rscratch1, Address(pre(rbcp, step))); 504 dispatch_base(state, Interpreter::dispatch_table(state), generate_poll); 505 } 506 507 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 508 // load current bytecode 509 ldrb(rscratch1, Address(rbcp, 0)); 510 dispatch_base(state, table); 511 } 512 513 // remove activation 514 // 515 // Unlock the receiver if this is a synchronized method. 516 // Unlock any Java monitors from syncronized blocks. 517 // Remove the activation from the stack. 518 // 519 // If there are locked Java monitors 520 // If throw_monitor_exception 521 // throws IllegalMonitorStateException 522 // Else if install_monitor_exception 523 // installs IllegalMonitorStateException 524 // Else 525 // no error processing 526 void InterpreterMacroAssembler::remove_activation( 527 TosState state, 528 bool throw_monitor_exception, 529 bool install_monitor_exception, 530 bool notify_jvmdi) { 531 // Note: Registers r3 xmm0 may be in use for the 532 // result check if synchronized method 533 Label unlocked, unlock, no_unlock; 534 535 // get the value of _do_not_unlock_if_synchronized into r3 536 const Address do_not_unlock_if_synchronized(rthread, 537 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 538 ldrb(r3, do_not_unlock_if_synchronized); 539 strb(zr, do_not_unlock_if_synchronized); // reset the flag 540 541 // get method access flags 542 ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize)); 543 ldr(r2, Address(r1, Method::access_flags_offset())); 544 tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked); 545 546 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 547 // is set. 548 cbnz(r3, no_unlock); 549 550 // unlock monitor 551 push(state); // save result 552 553 // BasicObjectLock will be first in list, since this is a 554 // synchronized method. However, need to check that the object has 555 // not been unlocked by an explicit monitorexit bytecode. 556 const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset * 557 wordSize - (int) sizeof(BasicObjectLock)); 558 // We use c_rarg1 so that if we go slow path it will be the correct 559 // register for unlock_object to pass to VM directly 560 lea(c_rarg1, monitor); // address of first monitor 561 562 ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 563 cbnz(r0, unlock); 564 565 pop(state); 566 if (throw_monitor_exception) { 567 // Entry already unlocked, need to throw exception 568 call_VM(noreg, CAST_FROM_FN_PTR(address, 569 InterpreterRuntime::throw_illegal_monitor_state_exception)); 570 should_not_reach_here(); 571 } else { 572 // Monitor already unlocked during a stack unroll. If requested, 573 // install an illegal_monitor_state_exception. Continue with 574 // stack unrolling. 575 if (install_monitor_exception) { 576 call_VM(noreg, CAST_FROM_FN_PTR(address, 577 InterpreterRuntime::new_illegal_monitor_state_exception)); 578 } 579 b(unlocked); 580 } 581 582 bind(unlock); 583 unlock_object(c_rarg1); 584 pop(state); 585 586 // Check that for block-structured locking (i.e., that all locked 587 // objects has been unlocked) 588 bind(unlocked); 589 590 // r0: Might contain return value 591 592 // Check that all monitors are unlocked 593 { 594 Label loop, exception, entry, restart; 595 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 596 const Address monitor_block_top( 597 rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 598 const Address monitor_block_bot( 599 rfp, frame::interpreter_frame_initial_sp_offset * wordSize); 600 601 bind(restart); 602 // We use c_rarg1 so that if we go slow path it will be the correct 603 // register for unlock_object to pass to VM directly 604 ldr(c_rarg1, monitor_block_top); // points to current entry, starting 605 // with top-most entry 606 lea(r19, monitor_block_bot); // points to word before bottom of 607 // monitor block 608 b(entry); 609 610 // Entry already locked, need to throw exception 611 bind(exception); 612 613 if (throw_monitor_exception) { 614 // Throw exception 615 MacroAssembler::call_VM(noreg, 616 CAST_FROM_FN_PTR(address, InterpreterRuntime:: 617 throw_illegal_monitor_state_exception)); 618 should_not_reach_here(); 619 } else { 620 // Stack unrolling. Unlock object and install illegal_monitor_exception. 621 // Unlock does not block, so don't have to worry about the frame. 622 // We don't have to preserve c_rarg1 since we are going to throw an exception. 623 624 push(state); 625 unlock_object(c_rarg1); 626 pop(state); 627 628 if (install_monitor_exception) { 629 call_VM(noreg, CAST_FROM_FN_PTR(address, 630 InterpreterRuntime:: 631 new_illegal_monitor_state_exception)); 632 } 633 634 b(restart); 635 } 636 637 bind(loop); 638 // check if current entry is used 639 ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 640 cbnz(rscratch1, exception); 641 642 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry 643 bind(entry); 644 cmp(c_rarg1, r19); // check if bottom reached 645 br(Assembler::NE, loop); // if not at bottom then check this entry 646 } 647 648 bind(no_unlock); 649 650 // jvmti support 651 if (notify_jvmdi) { 652 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA 653 } else { 654 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA 655 } 656 657 // remove activation 658 // get sender esp 659 ldr(esp, 660 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); 661 if (StackReservedPages > 0) { 662 // testing if reserved zone needs to be re-enabled 663 Label no_reserved_zone_enabling; 664 665 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 666 cmp(esp, rscratch1); 667 br(Assembler::LS, no_reserved_zone_enabling); 668 669 call_VM_leaf( 670 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); 671 call_VM(noreg, CAST_FROM_FN_PTR(address, 672 InterpreterRuntime::throw_delayed_StackOverflowError)); 673 should_not_reach_here(); 674 675 bind(no_reserved_zone_enabling); 676 } 677 // remove frame anchor 678 leave(); 679 // If we're returning to interpreted code we will shortly be 680 // adjusting SP to allow some space for ESP. If we're returning to 681 // compiled code the saved sender SP was saved in sender_sp, so this 682 // restores it. 683 andr(sp, esp, -16); 684 } 685 686 // Lock object 687 // 688 // Args: 689 // c_rarg1: BasicObjectLock to be used for locking 690 // 691 // Kills: 692 // r0 693 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs) 694 // rscratch1, rscratch2 (scratch regs) 695 void InterpreterMacroAssembler::lock_object(Register lock_reg) 696 { 697 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1"); 698 if (UseHeavyMonitors) { 699 call_VM(noreg, 700 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 701 lock_reg); 702 } else { 703 Label done; 704 705 const Register swap_reg = r0; 706 const Register tmp = c_rarg2; 707 const Register obj_reg = c_rarg3; // Will contain the oop 708 709 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 710 const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); 711 const int mark_offset = lock_offset + 712 BasicLock::displaced_header_offset_in_bytes(); 713 714 Label slow_case; 715 716 // Load object pointer into obj_reg %c_rarg3 717 ldr(obj_reg, Address(lock_reg, obj_offset)); 718 719 if (UseBiasedLocking) { 720 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case); 721 } 722 723 // Load (object->mark() | 1) into swap_reg 724 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 725 orr(swap_reg, rscratch1, 1); 726 727 // Save (object->mark() | 1) into BasicLock's displaced header 728 str(swap_reg, Address(lock_reg, mark_offset)); 729 730 assert(lock_offset == 0, 731 "displached header must be first word in BasicObjectLock"); 732 733 Label fail; 734 if (PrintBiasedLockingStatistics) { 735 Label fast; 736 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail); 737 bind(fast); 738 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()), 739 rscratch2, rscratch1, tmp); 740 b(done); 741 bind(fail); 742 } else { 743 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); 744 } 745 746 // Test if the oopMark is an obvious stack pointer, i.e., 747 // 1) (mark & 7) == 0, and 748 // 2) rsp <= mark < mark + os::pagesize() 749 // 750 // These 3 tests can be done by evaluating the following 751 // expression: ((mark - rsp) & (7 - os::vm_page_size())), 752 // assuming both stack pointer and pagesize have their 753 // least significant 3 bits clear. 754 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg 755 // NOTE2: aarch64 does not like to subtract sp from rn so take a 756 // copy 757 mov(rscratch1, sp); 758 sub(swap_reg, swap_reg, rscratch1); 759 ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size())); 760 761 // Save the test result, for recursive case, the result is zero 762 str(swap_reg, Address(lock_reg, mark_offset)); 763 764 if (PrintBiasedLockingStatistics) { 765 br(Assembler::NE, slow_case); 766 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()), 767 rscratch2, rscratch1, tmp); 768 } 769 br(Assembler::EQ, done); 770 771 bind(slow_case); 772 773 // Call the runtime routine for slow case 774 call_VM(noreg, 775 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 776 lock_reg); 777 778 bind(done); 779 } 780 } 781 782 783 // Unlocks an object. Used in monitorexit bytecode and 784 // remove_activation. Throws an IllegalMonitorException if object is 785 // not locked by current thread. 786 // 787 // Args: 788 // c_rarg1: BasicObjectLock for lock 789 // 790 // Kills: 791 // r0 792 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) 793 // rscratch1, rscratch2 (scratch regs) 794 void InterpreterMacroAssembler::unlock_object(Register lock_reg) 795 { 796 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1"); 797 798 if (UseHeavyMonitors) { 799 call_VM(noreg, 800 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 801 lock_reg); 802 } else { 803 Label done; 804 805 const Register swap_reg = r0; 806 const Register header_reg = c_rarg2; // Will contain the old oopMark 807 const Register obj_reg = c_rarg3; // Will contain the oop 808 809 save_bcp(); // Save in case of exception 810 811 // Convert from BasicObjectLock structure to object and BasicLock 812 // structure Store the BasicLock address into %r0 813 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); 814 815 // Load oop into obj_reg(%c_rarg3) 816 ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 817 818 // Free entry 819 str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); 820 821 if (UseBiasedLocking) { 822 biased_locking_exit(obj_reg, header_reg, done); 823 } 824 825 // Load the old header from BasicLock structure 826 ldr(header_reg, Address(swap_reg, 827 BasicLock::displaced_header_offset_in_bytes())); 828 829 // Test for recursion 830 cbz(header_reg, done); 831 832 // Atomic swap back the old header 833 cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); 834 835 // Call the runtime routine for slow case. 836 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj 837 call_VM(noreg, 838 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 839 lock_reg); 840 841 bind(done); 842 843 restore_bcp(); 844 } 845 } 846 847 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, 848 Label& zero_continue) { 849 assert(ProfileInterpreter, "must be profiling interpreter"); 850 ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 851 cbz(mdp, zero_continue); 852 } 853 854 // Set the method data pointer for the current bcp. 855 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 856 assert(ProfileInterpreter, "must be profiling interpreter"); 857 Label set_mdp; 858 stp(r0, r1, Address(pre(sp, -2 * wordSize))); 859 860 // Test MDO to avoid the call if it is NULL. 861 ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset()))); 862 cbz(r0, set_mdp); 863 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp); 864 // r0: mdi 865 // mdo is guaranteed to be non-zero here, we checked for it before the call. 866 ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset()))); 867 lea(r1, Address(r1, in_bytes(MethodData::data_offset()))); 868 add(r0, r1, r0); 869 str(r0, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 870 bind(set_mdp); 871 ldp(r0, r1, Address(post(sp, 2 * wordSize))); 872 } 873 874 void InterpreterMacroAssembler::verify_method_data_pointer() { 875 assert(ProfileInterpreter, "must be profiling interpreter"); 876 #ifdef ASSERT 877 Label verify_continue; 878 stp(r0, r1, Address(pre(sp, -2 * wordSize))); 879 stp(r2, r3, Address(pre(sp, -2 * wordSize))); 880 test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue 881 get_method(r1); 882 883 // If the mdp is valid, it will point to a DataLayout header which is 884 // consistent with the bcp. The converse is highly probable also. 885 ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset()))); 886 ldr(rscratch1, Address(r1, Method::const_offset())); 887 add(r2, r2, rscratch1, Assembler::LSL); 888 lea(r2, Address(r2, ConstMethod::codes_offset())); 889 cmp(r2, rbcp); 890 br(Assembler::EQ, verify_continue); 891 // r1: method 892 // rbcp: bcp // rbcp == 22 893 // r3: mdp 894 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), 895 r1, rbcp, r3); 896 bind(verify_continue); 897 ldp(r2, r3, Address(post(sp, 2 * wordSize))); 898 ldp(r0, r1, Address(post(sp, 2 * wordSize))); 899 #endif // ASSERT 900 } 901 902 903 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, 904 int constant, 905 Register value) { 906 assert(ProfileInterpreter, "must be profiling interpreter"); 907 Address data(mdp_in, constant); 908 str(value, data); 909 } 910 911 912 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 913 int constant, 914 bool decrement) { 915 increment_mdp_data_at(mdp_in, noreg, constant, decrement); 916 } 917 918 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, 919 Register reg, 920 int constant, 921 bool decrement) { 922 assert(ProfileInterpreter, "must be profiling interpreter"); 923 // %%% this does 64bit counters at best it is wasting space 924 // at worst it is a rare bug when counters overflow 925 926 assert_different_registers(rscratch2, rscratch1, mdp_in, reg); 927 928 Address addr1(mdp_in, constant); 929 Address addr2(rscratch2, reg, Address::lsl(0)); 930 Address &addr = addr1; 931 if (reg != noreg) { 932 lea(rscratch2, addr1); 933 addr = addr2; 934 } 935 936 if (decrement) { 937 // Decrement the register. Set condition codes. 938 // Intel does this 939 // addptr(data, (int32_t) -DataLayout::counter_increment); 940 // If the decrement causes the counter to overflow, stay negative 941 // Label L; 942 // jcc(Assembler::negative, L); 943 // addptr(data, (int32_t) DataLayout::counter_increment); 944 // so we do this 945 ldr(rscratch1, addr); 946 subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment); 947 Label L; 948 br(Assembler::LO, L); // skip store if counter underflow 949 str(rscratch1, addr); 950 bind(L); 951 } else { 952 assert(DataLayout::counter_increment == 1, 953 "flow-free idiom only works with 1"); 954 // Intel does this 955 // Increment the register. Set carry flag. 956 // addptr(data, DataLayout::counter_increment); 957 // If the increment causes the counter to overflow, pull back by 1. 958 // sbbptr(data, (int32_t)0); 959 // so we do this 960 ldr(rscratch1, addr); 961 adds(rscratch1, rscratch1, DataLayout::counter_increment); 962 Label L; 963 br(Assembler::CS, L); // skip store if counter overflow 964 str(rscratch1, addr); 965 bind(L); 966 } 967 } 968 969 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, 970 int flag_byte_constant) { 971 assert(ProfileInterpreter, "must be profiling interpreter"); 972 int header_offset = in_bytes(DataLayout::header_offset()); 973 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); 974 // Set the flag 975 ldr(rscratch1, Address(mdp_in, header_offset)); 976 orr(rscratch1, rscratch1, header_bits); 977 str(rscratch1, Address(mdp_in, header_offset)); 978 } 979 980 981 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, 982 int offset, 983 Register value, 984 Register test_value_out, 985 Label& not_equal_continue) { 986 assert(ProfileInterpreter, "must be profiling interpreter"); 987 if (test_value_out == noreg) { 988 ldr(rscratch1, Address(mdp_in, offset)); 989 cmp(value, rscratch1); 990 } else { 991 // Put the test value into a register, so caller can use it: 992 ldr(test_value_out, Address(mdp_in, offset)); 993 cmp(value, test_value_out); 994 } 995 br(Assembler::NE, not_equal_continue); 996 } 997 998 999 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1000 int offset_of_disp) { 1001 assert(ProfileInterpreter, "must be profiling interpreter"); 1002 ldr(rscratch1, Address(mdp_in, offset_of_disp)); 1003 add(mdp_in, mdp_in, rscratch1, LSL); 1004 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 1005 } 1006 1007 1008 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, 1009 Register reg, 1010 int offset_of_disp) { 1011 assert(ProfileInterpreter, "must be profiling interpreter"); 1012 lea(rscratch1, Address(mdp_in, offset_of_disp)); 1013 ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0))); 1014 add(mdp_in, mdp_in, rscratch1, LSL); 1015 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 1016 } 1017 1018 1019 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, 1020 int constant) { 1021 assert(ProfileInterpreter, "must be profiling interpreter"); 1022 add(mdp_in, mdp_in, (unsigned)constant); 1023 str(mdp_in, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 1024 } 1025 1026 1027 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { 1028 assert(ProfileInterpreter, "must be profiling interpreter"); 1029 // save/restore across call_VM 1030 stp(zr, return_bci, Address(pre(sp, -2 * wordSize))); 1031 call_VM(noreg, 1032 CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), 1033 return_bci); 1034 ldp(zr, return_bci, Address(post(sp, 2 * wordSize))); 1035 } 1036 1037 1038 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, 1039 Register bumped_count) { 1040 if (ProfileInterpreter) { 1041 Label profile_continue; 1042 1043 // If no method data exists, go to profile_continue. 1044 // Otherwise, assign to mdp 1045 test_method_data_pointer(mdp, profile_continue); 1046 1047 // We are taking a branch. Increment the taken count. 1048 // We inline increment_mdp_data_at to return bumped_count in a register 1049 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); 1050 Address data(mdp, in_bytes(JumpData::taken_offset())); 1051 ldr(bumped_count, data); 1052 assert(DataLayout::counter_increment == 1, 1053 "flow-free idiom only works with 1"); 1054 // Intel does this to catch overflow 1055 // addptr(bumped_count, DataLayout::counter_increment); 1056 // sbbptr(bumped_count, 0); 1057 // so we do this 1058 adds(bumped_count, bumped_count, DataLayout::counter_increment); 1059 Label L; 1060 br(Assembler::CS, L); // skip store if counter overflow 1061 str(bumped_count, data); 1062 bind(L); 1063 // The method data pointer needs to be updated to reflect the new target. 1064 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); 1065 bind(profile_continue); 1066 } 1067 } 1068 1069 1070 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { 1071 if (ProfileInterpreter) { 1072 Label profile_continue; 1073 1074 // If no method data exists, go to profile_continue. 1075 test_method_data_pointer(mdp, profile_continue); 1076 1077 // We are taking a branch. Increment the not taken count. 1078 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); 1079 1080 // The method data pointer needs to be updated to correspond to 1081 // the next bytecode 1082 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); 1083 bind(profile_continue); 1084 } 1085 } 1086 1087 1088 void InterpreterMacroAssembler::profile_call(Register mdp) { 1089 if (ProfileInterpreter) { 1090 Label profile_continue; 1091 1092 // If no method data exists, go to profile_continue. 1093 test_method_data_pointer(mdp, profile_continue); 1094 1095 // We are making a call. Increment the count. 1096 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1097 1098 // The method data pointer needs to be updated to reflect the new target. 1099 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); 1100 bind(profile_continue); 1101 } 1102 } 1103 1104 void InterpreterMacroAssembler::profile_final_call(Register mdp) { 1105 if (ProfileInterpreter) { 1106 Label profile_continue; 1107 1108 // If no method data exists, go to profile_continue. 1109 test_method_data_pointer(mdp, profile_continue); 1110 1111 // We are making a call. Increment the count. 1112 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1113 1114 // The method data pointer needs to be updated to reflect the new target. 1115 update_mdp_by_constant(mdp, 1116 in_bytes(VirtualCallData:: 1117 virtual_call_data_size())); 1118 bind(profile_continue); 1119 } 1120 } 1121 1122 1123 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1124 Register mdp, 1125 Register reg2, 1126 bool receiver_can_be_null) { 1127 if (ProfileInterpreter) { 1128 Label profile_continue; 1129 1130 // If no method data exists, go to profile_continue. 1131 test_method_data_pointer(mdp, profile_continue); 1132 1133 Label skip_receiver_profile; 1134 if (receiver_can_be_null) { 1135 Label not_null; 1136 // We are making a call. Increment the count for null receiver. 1137 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1138 b(skip_receiver_profile); 1139 bind(not_null); 1140 } 1141 1142 // Record the receiver type. 1143 record_klass_in_profile(receiver, mdp, reg2, true); 1144 bind(skip_receiver_profile); 1145 1146 // The method data pointer needs to be updated to reflect the new target. 1147 #if INCLUDE_JVMCI 1148 if (MethodProfileWidth == 0) { 1149 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1150 } 1151 #else // INCLUDE_JVMCI 1152 update_mdp_by_constant(mdp, 1153 in_bytes(VirtualCallData:: 1154 virtual_call_data_size())); 1155 #endif // INCLUDE_JVMCI 1156 bind(profile_continue); 1157 } 1158 } 1159 1160 #if INCLUDE_JVMCI 1161 void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) { 1162 assert_different_registers(method, mdp, reg2); 1163 if (ProfileInterpreter && MethodProfileWidth > 0) { 1164 Label profile_continue; 1165 1166 // If no method data exists, go to profile_continue. 1167 test_method_data_pointer(mdp, profile_continue); 1168 1169 Label done; 1170 record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth, 1171 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1172 bind(done); 1173 1174 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size())); 1175 bind(profile_continue); 1176 } 1177 } 1178 #endif // INCLUDE_JVMCI 1179 1180 // This routine creates a state machine for updating the multi-row 1181 // type profile at a virtual call site (or other type-sensitive bytecode). 1182 // The machine visits each row (of receiver/count) until the receiver type 1183 // is found, or until it runs out of rows. At the same time, it remembers 1184 // the location of the first empty row. (An empty row records null for its 1185 // receiver, and can be allocated for a newly-observed receiver type.) 1186 // Because there are two degrees of freedom in the state, a simple linear 1187 // search will not work; it must be a decision tree. Hence this helper 1188 // function is recursive, to generate the required tree structured code. 1189 // It's the interpreter, so we are trading off code space for speed. 1190 // See below for example code. 1191 void InterpreterMacroAssembler::record_klass_in_profile_helper( 1192 Register receiver, Register mdp, 1193 Register reg2, int start_row, 1194 Label& done, bool is_virtual_call) { 1195 if (TypeProfileWidth == 0) { 1196 if (is_virtual_call) { 1197 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1198 } 1199 #if INCLUDE_JVMCI 1200 else if (EnableJVMCI) { 1201 increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset())); 1202 } 1203 #endif // INCLUDE_JVMCI 1204 } else { 1205 int non_profiled_offset = -1; 1206 if (is_virtual_call) { 1207 non_profiled_offset = in_bytes(CounterData::count_offset()); 1208 } 1209 #if INCLUDE_JVMCI 1210 else if (EnableJVMCI) { 1211 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1212 } 1213 #endif // INCLUDE_JVMCI 1214 1215 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth, 1216 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1217 } 1218 } 1219 1220 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, 1221 Register reg2, int start_row, Label& done, int total_rows, 1222 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1223 int non_profiled_offset) { 1224 int last_row = total_rows - 1; 1225 assert(start_row <= last_row, "must be work left to do"); 1226 // Test this row for both the item and for null. 1227 // Take any of three different outcomes: 1228 // 1. found item => increment count and goto done 1229 // 2. found null => keep looking for case 1, maybe allocate this cell 1230 // 3. found something else => keep looking for cases 1 and 2 1231 // Case 3 is handled by a recursive call. 1232 for (int row = start_row; row <= last_row; row++) { 1233 Label next_test; 1234 bool test_for_null_also = (row == start_row); 1235 1236 // See if the item is item[n]. 1237 int item_offset = in_bytes(item_offset_fn(row)); 1238 test_mdp_data_at(mdp, item_offset, item, 1239 (test_for_null_also ? reg2 : noreg), 1240 next_test); 1241 // (Reg2 now contains the item from the CallData.) 1242 1243 // The item is item[n]. Increment count[n]. 1244 int count_offset = in_bytes(item_count_offset_fn(row)); 1245 increment_mdp_data_at(mdp, count_offset); 1246 b(done); 1247 bind(next_test); 1248 1249 if (test_for_null_also) { 1250 Label found_null; 1251 // Failed the equality check on item[n]... Test for null. 1252 if (start_row == last_row) { 1253 // The only thing left to do is handle the null case. 1254 if (non_profiled_offset >= 0) { 1255 cbz(reg2, found_null); 1256 // Item did not match any saved item and there is no empty row for it. 1257 // Increment total counter to indicate polymorphic case. 1258 increment_mdp_data_at(mdp, non_profiled_offset); 1259 b(done); 1260 bind(found_null); 1261 } else { 1262 cbnz(reg2, done); 1263 } 1264 break; 1265 } 1266 // Since null is rare, make it be the branch-taken case. 1267 cbz(reg2, found_null); 1268 1269 // Put all the "Case 3" tests here. 1270 record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows, 1271 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1272 1273 // Found a null. Keep searching for a matching item, 1274 // but remember that this is an empty (unused) slot. 1275 bind(found_null); 1276 } 1277 } 1278 1279 // In the fall-through case, we found no matching item, but we 1280 // observed the item[start_row] is NULL. 1281 1282 // Fill in the item field and increment the count. 1283 int item_offset = in_bytes(item_offset_fn(start_row)); 1284 set_mdp_data_at(mdp, item_offset, item); 1285 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1286 mov(reg2, DataLayout::counter_increment); 1287 set_mdp_data_at(mdp, count_offset, reg2); 1288 if (start_row > 0) { 1289 b(done); 1290 } 1291 } 1292 1293 // Example state machine code for three profile rows: 1294 // // main copy of decision tree, rooted at row[1] 1295 // if (row[0].rec == rec) { row[0].incr(); goto done; } 1296 // if (row[0].rec != NULL) { 1297 // // inner copy of decision tree, rooted at row[1] 1298 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1299 // if (row[1].rec != NULL) { 1300 // // degenerate decision tree, rooted at row[2] 1301 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1302 // if (row[2].rec != NULL) { count.incr(); goto done; } // overflow 1303 // row[2].init(rec); goto done; 1304 // } else { 1305 // // remember row[1] is empty 1306 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1307 // row[1].init(rec); goto done; 1308 // } 1309 // } else { 1310 // // remember row[0] is empty 1311 // if (row[1].rec == rec) { row[1].incr(); goto done; } 1312 // if (row[2].rec == rec) { row[2].incr(); goto done; } 1313 // row[0].init(rec); goto done; 1314 // } 1315 // done: 1316 1317 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1318 Register mdp, Register reg2, 1319 bool is_virtual_call) { 1320 assert(ProfileInterpreter, "must be profiling"); 1321 Label done; 1322 1323 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); 1324 1325 bind (done); 1326 } 1327 1328 void InterpreterMacroAssembler::profile_ret(Register return_bci, 1329 Register mdp) { 1330 if (ProfileInterpreter) { 1331 Label profile_continue; 1332 uint row; 1333 1334 // If no method data exists, go to profile_continue. 1335 test_method_data_pointer(mdp, profile_continue); 1336 1337 // Update the total ret count. 1338 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 1339 1340 for (row = 0; row < RetData::row_limit(); row++) { 1341 Label next_test; 1342 1343 // See if return_bci is equal to bci[n]: 1344 test_mdp_data_at(mdp, 1345 in_bytes(RetData::bci_offset(row)), 1346 return_bci, noreg, 1347 next_test); 1348 1349 // return_bci is equal to bci[n]. Increment the count. 1350 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); 1351 1352 // The method data pointer needs to be updated to reflect the new target. 1353 update_mdp_by_offset(mdp, 1354 in_bytes(RetData::bci_displacement_offset(row))); 1355 b(profile_continue); 1356 bind(next_test); 1357 } 1358 1359 update_mdp_for_ret(return_bci); 1360 1361 bind(profile_continue); 1362 } 1363 } 1364 1365 void InterpreterMacroAssembler::profile_null_seen(Register mdp) { 1366 if (ProfileInterpreter) { 1367 Label profile_continue; 1368 1369 // If no method data exists, go to profile_continue. 1370 test_method_data_pointer(mdp, profile_continue); 1371 1372 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); 1373 1374 // The method data pointer needs to be updated. 1375 int mdp_delta = in_bytes(BitData::bit_data_size()); 1376 if (TypeProfileCasts) { 1377 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1378 } 1379 update_mdp_by_constant(mdp, mdp_delta); 1380 1381 bind(profile_continue); 1382 } 1383 } 1384 1385 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { 1386 if (ProfileInterpreter && TypeProfileCasts) { 1387 Label profile_continue; 1388 1389 // If no method data exists, go to profile_continue. 1390 test_method_data_pointer(mdp, profile_continue); 1391 1392 int count_offset = in_bytes(CounterData::count_offset()); 1393 // Back up the address, since we have already bumped the mdp. 1394 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1395 1396 // *Decrement* the counter. We expect to see zero or small negatives. 1397 increment_mdp_data_at(mdp, count_offset, true); 1398 1399 bind (profile_continue); 1400 } 1401 } 1402 1403 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { 1404 if (ProfileInterpreter) { 1405 Label profile_continue; 1406 1407 // If no method data exists, go to profile_continue. 1408 test_method_data_pointer(mdp, profile_continue); 1409 1410 // The method data pointer needs to be updated. 1411 int mdp_delta = in_bytes(BitData::bit_data_size()); 1412 if (TypeProfileCasts) { 1413 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1414 1415 // Record the object type. 1416 record_klass_in_profile(klass, mdp, reg2, false); 1417 } 1418 update_mdp_by_constant(mdp, mdp_delta); 1419 1420 bind(profile_continue); 1421 } 1422 } 1423 1424 void InterpreterMacroAssembler::profile_switch_default(Register mdp) { 1425 if (ProfileInterpreter) { 1426 Label profile_continue; 1427 1428 // If no method data exists, go to profile_continue. 1429 test_method_data_pointer(mdp, profile_continue); 1430 1431 // Update the default case count 1432 increment_mdp_data_at(mdp, 1433 in_bytes(MultiBranchData::default_count_offset())); 1434 1435 // The method data pointer needs to be updated. 1436 update_mdp_by_offset(mdp, 1437 in_bytes(MultiBranchData:: 1438 default_displacement_offset())); 1439 1440 bind(profile_continue); 1441 } 1442 } 1443 1444 void InterpreterMacroAssembler::profile_switch_case(Register index, 1445 Register mdp, 1446 Register reg2) { 1447 if (ProfileInterpreter) { 1448 Label profile_continue; 1449 1450 // If no method data exists, go to profile_continue. 1451 test_method_data_pointer(mdp, profile_continue); 1452 1453 // Build the base (index * per_case_size_in_bytes()) + 1454 // case_array_offset_in_bytes() 1455 movw(reg2, in_bytes(MultiBranchData::per_case_size())); 1456 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset())); 1457 Assembler::maddw(index, index, reg2, rscratch1); 1458 1459 // Update the case count 1460 increment_mdp_data_at(mdp, 1461 index, 1462 in_bytes(MultiBranchData::relative_count_offset())); 1463 1464 // The method data pointer needs to be updated. 1465 update_mdp_by_offset(mdp, 1466 index, 1467 in_bytes(MultiBranchData:: 1468 relative_displacement_offset())); 1469 1470 bind(profile_continue); 1471 } 1472 } 1473 1474 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 1475 if (state == atos) { 1476 MacroAssembler::verify_oop(reg); 1477 } 1478 } 1479 1480 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; } 1481 1482 1483 void InterpreterMacroAssembler::notify_method_entry() { 1484 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1485 // track stack depth. If it is possible to enter interp_only_mode we add 1486 // the code to check if the event should be sent. 1487 if (JvmtiExport::can_post_interpreter_events()) { 1488 Label L; 1489 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset())); 1490 cbzw(r3, L); 1491 call_VM(noreg, CAST_FROM_FN_PTR(address, 1492 InterpreterRuntime::post_method_entry)); 1493 bind(L); 1494 } 1495 1496 { 1497 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1498 get_method(c_rarg1); 1499 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1500 rthread, c_rarg1); 1501 } 1502 1503 // RedefineClasses() tracing support for obsolete method entry 1504 if (log_is_enabled(Trace, redefine, class, obsolete)) { 1505 get_method(c_rarg1); 1506 call_VM_leaf( 1507 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1508 rthread, c_rarg1); 1509 } 1510 1511 } 1512 1513 1514 void InterpreterMacroAssembler::notify_method_exit( 1515 TosState state, NotifyMethodExitMode mode) { 1516 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to 1517 // track stack depth. If it is possible to enter interp_only_mode we add 1518 // the code to check if the event should be sent. 1519 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 1520 Label L; 1521 // Note: frame::interpreter_frame_result has a dependency on how the 1522 // method result is saved across the call to post_method_exit. If this 1523 // is changed then the interpreter_frame_result implementation will 1524 // need to be updated too. 1525 1526 // template interpreter will leave the result on the top of the stack. 1527 push(state); 1528 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset())); 1529 cbz(r3, L); 1530 call_VM(noreg, 1531 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 1532 bind(L); 1533 pop(state); 1534 } 1535 1536 { 1537 SkipIfEqual skip(this, &DTraceMethodProbes, false); 1538 push(state); 1539 get_method(c_rarg1); 1540 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1541 rthread, c_rarg1); 1542 pop(state); 1543 } 1544 } 1545 1546 1547 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1548 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1549 int increment, Address mask, 1550 Register scratch, Register scratch2, 1551 bool preloaded, Condition cond, 1552 Label* where) { 1553 if (!preloaded) { 1554 ldrw(scratch, counter_addr); 1555 } 1556 add(scratch, scratch, increment); 1557 strw(scratch, counter_addr); 1558 ldrw(scratch2, mask); 1559 ands(scratch, scratch, scratch2); 1560 br(cond, *where); 1561 } 1562 1563 void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, 1564 int number_of_arguments) { 1565 // interpreter specific 1566 // 1567 // Note: No need to save/restore rbcp & rlocals pointer since these 1568 // are callee saved registers and no blocking/ GC can happen 1569 // in leaf calls. 1570 #ifdef ASSERT 1571 { 1572 Label L; 1573 ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1574 cbz(rscratch1, L); 1575 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 1576 " last_sp != NULL"); 1577 bind(L); 1578 } 1579 #endif /* ASSERT */ 1580 // super call 1581 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1582 } 1583 1584 void InterpreterMacroAssembler::call_VM_base(Register oop_result, 1585 Register java_thread, 1586 Register last_java_sp, 1587 address entry_point, 1588 int number_of_arguments, 1589 bool check_exceptions) { 1590 // interpreter specific 1591 // 1592 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't 1593 // really make a difference for these runtime calls, since they are 1594 // slow anyway. Btw., bcp must be saved/restored since it may change 1595 // due to GC. 1596 // assert(java_thread == noreg , "not expecting a precomputed java thread"); 1597 save_bcp(); 1598 #ifdef ASSERT 1599 { 1600 Label L; 1601 ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1602 cbz(rscratch1, L); 1603 stop("InterpreterMacroAssembler::call_VM_leaf_base:" 1604 " last_sp != NULL"); 1605 bind(L); 1606 } 1607 #endif /* ASSERT */ 1608 // super call 1609 MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp, 1610 entry_point, number_of_arguments, 1611 check_exceptions); 1612 // interpreter specific 1613 restore_bcp(); 1614 restore_locals(); 1615 } 1616 1617 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { 1618 assert_different_registers(obj, rscratch1); 1619 Label update, next, none; 1620 1621 verify_oop(obj); 1622 1623 cbnz(obj, update); 1624 orptr(mdo_addr, TypeEntries::null_seen); 1625 b(next); 1626 1627 bind(update); 1628 load_klass(obj, obj); 1629 1630 ldr(rscratch1, mdo_addr); 1631 eor(obj, obj, rscratch1); 1632 tst(obj, TypeEntries::type_klass_mask); 1633 br(Assembler::EQ, next); // klass seen before, nothing to 1634 // do. The unknown bit may have been 1635 // set already but no need to check. 1636 1637 tbnz(obj, exact_log2(TypeEntries::type_unknown), next); 1638 // already unknown. Nothing to do anymore. 1639 1640 ldr(rscratch1, mdo_addr); 1641 cbz(rscratch1, none); 1642 cmp(rscratch1, TypeEntries::null_seen); 1643 br(Assembler::EQ, none); 1644 // There is a chance that the checks above (re-reading profiling 1645 // data from memory) fail if another thread has just set the 1646 // profiling to this obj's klass 1647 ldr(rscratch1, mdo_addr); 1648 eor(obj, obj, rscratch1); 1649 tst(obj, TypeEntries::type_klass_mask); 1650 br(Assembler::EQ, next); 1651 1652 // different than before. Cannot keep accurate profile. 1653 orptr(mdo_addr, TypeEntries::type_unknown); 1654 b(next); 1655 1656 bind(none); 1657 // first time here. Set profile type. 1658 str(obj, mdo_addr); 1659 1660 bind(next); 1661 } 1662 1663 void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { 1664 if (!ProfileInterpreter) { 1665 return; 1666 } 1667 1668 if (MethodData::profile_arguments() || MethodData::profile_return()) { 1669 Label profile_continue; 1670 1671 test_method_data_pointer(mdp, profile_continue); 1672 1673 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 1674 1675 ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start)); 1676 cmp(rscratch1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 1677 br(Assembler::NE, profile_continue); 1678 1679 if (MethodData::profile_arguments()) { 1680 Label done; 1681 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 1682 1683 for (int i = 0; i < TypeProfileArgsLimit; i++) { 1684 if (i > 0 || MethodData::profile_return()) { 1685 // If return value type is profiled we may have no argument to profile 1686 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); 1687 sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count()); 1688 cmp(tmp, TypeStackSlotEntries::per_arg_count()); 1689 add(rscratch1, mdp, off_to_args); 1690 br(Assembler::LT, done); 1691 } 1692 ldr(tmp, Address(callee, Method::const_offset())); 1693 load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); 1694 // stack offset o (zero based) from the start of the argument 1695 // list, for n arguments translates into offset n - o - 1 from 1696 // the end of the argument list 1697 ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i)))); 1698 sub(tmp, tmp, rscratch1); 1699 sub(tmp, tmp, 1); 1700 Address arg_addr = argument_address(tmp); 1701 ldr(tmp, arg_addr); 1702 1703 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))); 1704 profile_obj_type(tmp, mdo_arg_addr); 1705 1706 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 1707 off_to_args += to_add; 1708 } 1709 1710 if (MethodData::profile_return()) { 1711 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); 1712 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 1713 } 1714 1715 add(rscratch1, mdp, off_to_args); 1716 bind(done); 1717 mov(mdp, rscratch1); 1718 1719 if (MethodData::profile_return()) { 1720 // We're right after the type profile for the last 1721 // argument. tmp is the number of cells left in the 1722 // CallTypeData/VirtualCallTypeData to reach its end. Non null 1723 // if there's a return to profile. 1724 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 1725 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size)); 1726 } 1727 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize)); 1728 } else { 1729 assert(MethodData::profile_return(), "either profile call args or call ret"); 1730 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); 1731 } 1732 1733 // mdp points right after the end of the 1734 // CallTypeData/VirtualCallTypeData, right after the cells for the 1735 // return value type if there's one 1736 1737 bind(profile_continue); 1738 } 1739 } 1740 1741 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { 1742 assert_different_registers(mdp, ret, tmp, rbcp); 1743 if (ProfileInterpreter && MethodData::profile_return()) { 1744 Label profile_continue, done; 1745 1746 test_method_data_pointer(mdp, profile_continue); 1747 1748 if (MethodData::profile_return_jsr292_only()) { 1749 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 1750 1751 // If we don't profile all invoke bytecodes we must make sure 1752 // it's a bytecode we indeed profile. We can't go back to the 1753 // begining of the ProfileData we intend to update to check its 1754 // type because we're right after it and we don't known its 1755 // length 1756 Label do_profile; 1757 ldrb(rscratch1, Address(rbcp, 0)); 1758 cmp(rscratch1, Bytecodes::_invokedynamic); 1759 br(Assembler::EQ, do_profile); 1760 cmp(rscratch1, Bytecodes::_invokehandle); 1761 br(Assembler::EQ, do_profile); 1762 get_method(tmp); 1763 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes())); 1764 cmp(rscratch1, vmIntrinsics::_compiledLambdaForm); 1765 br(Assembler::NE, profile_continue); 1766 1767 bind(do_profile); 1768 } 1769 1770 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); 1771 mov(tmp, ret); 1772 profile_obj_type(tmp, mdo_ret_addr); 1773 1774 bind(profile_continue); 1775 } 1776 } 1777 1778 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { 1779 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2); 1780 if (ProfileInterpreter && MethodData::profile_parameters()) { 1781 Label profile_continue, done; 1782 1783 test_method_data_pointer(mdp, profile_continue); 1784 1785 // Load the offset of the area within the MDO used for 1786 // parameters. If it's negative we're not profiling any parameters 1787 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); 1788 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set 1789 1790 // Compute a pointer to the area for parameters from the offset 1791 // and move the pointer to the slot for the last 1792 // parameters. Collect profiling from last parameter down. 1793 // mdo start + parameters offset + array length - 1 1794 add(mdp, mdp, tmp1); 1795 ldr(tmp1, Address(mdp, ArrayData::array_len_offset())); 1796 sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count()); 1797 1798 Label loop; 1799 bind(loop); 1800 1801 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 1802 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 1803 int per_arg_scale = exact_log2(DataLayout::cell_size); 1804 add(rscratch1, mdp, off_base); 1805 add(rscratch2, mdp, type_base); 1806 1807 Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale)); 1808 Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale)); 1809 1810 // load offset on the stack from the slot for this parameter 1811 ldr(tmp2, arg_off); 1812 neg(tmp2, tmp2); 1813 // read the parameter from the local area 1814 ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize))); 1815 1816 // profile the parameter 1817 profile_obj_type(tmp2, arg_type); 1818 1819 // go to next parameter 1820 subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count()); 1821 br(Assembler::GE, loop); 1822 1823 bind(profile_continue); 1824 } 1825 }