1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "asm/macroAssembler.hpp" 29 #include "ci/ciEnv.hpp" 30 #include "code/nativeInst.hpp" 31 #include "compiler/disassembler.hpp" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/cardTable.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/cardTableBarrierSet.hpp" 36 #include "gc/shared/collectedHeap.inline.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/accessDecorators.hpp" 40 #include "oops/klass.inline.hpp" 41 #include "prims/methodHandles.hpp" 42 #include "runtime/biasedLocking.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/objectMonitor.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/macros.hpp" 49 50 // Implementation of AddressLiteral 51 52 void AddressLiteral::set_rspec(relocInfo::relocType rtype) { 53 switch (rtype) { 54 case relocInfo::oop_type: 55 // Oops are a special case. Normally they would be their own section 56 // but in cases like icBuffer they are literals in the code stream that 57 // we don't have a section for. We use none so that we get a literal address 58 // which is always patchable. 59 break; 60 case relocInfo::external_word_type: 61 _rspec = external_word_Relocation::spec(_target); 62 break; 63 case relocInfo::internal_word_type: 64 _rspec = internal_word_Relocation::spec(_target); 65 break; 66 case relocInfo::opt_virtual_call_type: 67 _rspec = opt_virtual_call_Relocation::spec(); 68 break; 69 case relocInfo::static_call_type: 70 _rspec = static_call_Relocation::spec(); 71 break; 72 case relocInfo::runtime_call_type: 73 _rspec = runtime_call_Relocation::spec(); 74 break; 75 case relocInfo::poll_type: 76 case relocInfo::poll_return_type: 77 _rspec = Relocation::spec_simple(rtype); 78 break; 79 case relocInfo::none: 80 break; 81 default: 82 ShouldNotReachHere(); 83 break; 84 } 85 } 86 87 // Initially added to the Assembler interface as a pure virtual: 88 // RegisterConstant delayed_value(..) 89 // for: 90 // 6812678 macro assembler needs delayed binding of a few constants (for 6655638) 91 // this was subsequently modified to its present name and return type 92 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 93 Register tmp, 94 int offset) { 95 ShouldNotReachHere(); 96 return RegisterOrConstant(-1); 97 } 98 99 100 #ifdef AARCH64 101 // Note: ARM32 version is OS dependent 102 void MacroAssembler::breakpoint(AsmCondition cond) { 103 if (cond == al) { 104 brk(); 105 } else { 106 Label L; 107 b(L, inverse(cond)); 108 brk(); 109 bind(L); 110 } 111 } 112 #endif // AARCH64 113 114 115 // virtual method calling 116 void MacroAssembler::lookup_virtual_method(Register recv_klass, 117 Register vtable_index, 118 Register method_result) { 119 const int base_offset = in_bytes(Klass::vtable_start_offset()) + vtableEntry::method_offset_in_bytes(); 120 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 121 add(recv_klass, recv_klass, AsmOperand(vtable_index, lsl, LogBytesPerWord)); 122 ldr(method_result, Address(recv_klass, base_offset)); 123 } 124 125 126 // Simplified, combined version, good for typical uses. 127 // Falls through on failure. 128 void MacroAssembler::check_klass_subtype(Register sub_klass, 129 Register super_klass, 130 Register temp_reg, 131 Register temp_reg2, 132 Register temp_reg3, 133 Label& L_success) { 134 Label L_failure; 135 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, temp_reg2, &L_success, &L_failure, NULL); 136 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, temp_reg2, temp_reg3, &L_success, NULL); 137 bind(L_failure); 138 }; 139 140 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 141 Register super_klass, 142 Register temp_reg, 143 Register temp_reg2, 144 Label* L_success, 145 Label* L_failure, 146 Label* L_slow_path) { 147 148 assert_different_registers(sub_klass, super_klass, temp_reg, temp_reg2, noreg); 149 const Register super_check_offset = temp_reg2; 150 151 Label L_fallthrough; 152 int label_nulls = 0; 153 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 154 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 155 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 156 assert(label_nulls <= 1, "at most one NULL in the batch"); 157 158 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 159 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 160 Address super_check_offset_addr(super_klass, sco_offset); 161 162 // If the pointers are equal, we are done (e.g., String[] elements). 163 // This self-check enables sharing of secondary supertype arrays among 164 // non-primary types such as array-of-interface. Otherwise, each such 165 // type would need its own customized SSA. 166 // We move this check to the front of the fast path because many 167 // type checks are in fact trivially successful in this manner, 168 // so we get a nicely predicted branch right at the start of the check. 169 cmp(sub_klass, super_klass); 170 b(*L_success, eq); 171 172 // Check the supertype display: 173 ldr_u32(super_check_offset, super_check_offset_addr); 174 175 Address super_check_addr(sub_klass, super_check_offset); 176 ldr(temp_reg, super_check_addr); 177 cmp(super_klass, temp_reg); // load displayed supertype 178 179 // This check has worked decisively for primary supers. 180 // Secondary supers are sought in the super_cache ('super_cache_addr'). 181 // (Secondary supers are interfaces and very deeply nested subtypes.) 182 // This works in the same check above because of a tricky aliasing 183 // between the super_cache and the primary super display elements. 184 // (The 'super_check_addr' can address either, as the case requires.) 185 // Note that the cache is updated below if it does not help us find 186 // what we need immediately. 187 // So if it was a primary super, we can just fail immediately. 188 // Otherwise, it's the slow path for us (no success at this point). 189 190 b(*L_success, eq); 191 cmp_32(super_check_offset, sc_offset); 192 if (L_failure == &L_fallthrough) { 193 b(*L_slow_path, eq); 194 } else { 195 b(*L_failure, ne); 196 if (L_slow_path != &L_fallthrough) { 197 b(*L_slow_path); 198 } 199 } 200 201 bind(L_fallthrough); 202 } 203 204 205 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 206 Register super_klass, 207 Register temp_reg, 208 Register temp2_reg, 209 Register temp3_reg, 210 Label* L_success, 211 Label* L_failure, 212 bool set_cond_codes) { 213 #ifdef AARCH64 214 NOT_IMPLEMENTED(); 215 #else 216 // Note: if used by code that expects a register to be 0 on success, 217 // this register must be temp_reg and set_cond_codes must be true 218 219 Register saved_reg = noreg; 220 221 // get additional tmp registers 222 if (temp3_reg == noreg) { 223 saved_reg = temp3_reg = LR; 224 push(saved_reg); 225 } 226 227 assert(temp2_reg != noreg, "need all the temporary registers"); 228 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg); 229 230 Register cmp_temp = temp_reg; 231 Register scan_temp = temp3_reg; 232 Register count_temp = temp2_reg; 233 234 Label L_fallthrough; 235 int label_nulls = 0; 236 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 237 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 238 assert(label_nulls <= 1, "at most one NULL in the batch"); 239 240 // a couple of useful fields in sub_klass: 241 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 242 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 243 Address secondary_supers_addr(sub_klass, ss_offset); 244 Address super_cache_addr( sub_klass, sc_offset); 245 246 #ifndef PRODUCT 247 inc_counter((address)&SharedRuntime::_partial_subtype_ctr, scan_temp, count_temp); 248 #endif 249 250 // We will consult the secondary-super array. 251 ldr(scan_temp, Address(sub_klass, ss_offset)); 252 253 assert(! UseCompressedOops, "search_key must be the compressed super_klass"); 254 // else search_key is the 255 Register search_key = super_klass; 256 257 // Load the array length. 258 ldr(count_temp, Address(scan_temp, Array<Klass*>::length_offset_in_bytes())); 259 add(scan_temp, scan_temp, Array<Klass*>::base_offset_in_bytes()); 260 261 add(count_temp, count_temp, 1); 262 263 Label L_loop, L_fail; 264 265 // Top of search loop 266 bind(L_loop); 267 // Notes: 268 // scan_temp starts at the array elements 269 // count_temp is 1+size 270 subs(count_temp, count_temp, 1); 271 if ((L_failure != &L_fallthrough) && (! set_cond_codes) && (saved_reg == noreg)) { 272 // direct jump to L_failure if failed and no cleanup needed 273 b(*L_failure, eq); // not found and 274 } else { 275 b(L_fail, eq); // not found in the array 276 } 277 278 // Load next super to check 279 // In the array of super classes elements are pointer sized. 280 int element_size = wordSize; 281 ldr(cmp_temp, Address(scan_temp, element_size, post_indexed)); 282 283 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 284 subs(cmp_temp, cmp_temp, search_key); 285 286 // A miss means we are NOT a subtype and need to keep looping 287 b(L_loop, ne); 288 289 // Falling out the bottom means we found a hit; we ARE a subtype 290 291 // Note: temp_reg/cmp_temp is already 0 and flag Z is set 292 293 // Success. Cache the super we found and proceed in triumph. 294 str(super_klass, Address(sub_klass, sc_offset)); 295 296 if (saved_reg != noreg) { 297 // Return success 298 pop(saved_reg); 299 } 300 301 b(*L_success); 302 303 bind(L_fail); 304 // Note1: check "b(*L_failure, eq)" above if adding extra instructions here 305 if (set_cond_codes) { 306 movs(temp_reg, sub_klass); // clears Z and sets temp_reg to non-0 if needed 307 } 308 if (saved_reg != noreg) { 309 pop(saved_reg); 310 } 311 if (L_failure != &L_fallthrough) { 312 b(*L_failure); 313 } 314 315 bind(L_fallthrough); 316 #endif 317 } 318 319 // Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same. 320 Address MacroAssembler::receiver_argument_address(Register params_base, Register params_count, Register tmp) { 321 assert_different_registers(params_base, params_count); 322 add(tmp, params_base, AsmOperand(params_count, lsl, Interpreter::logStackElementSize)); 323 return Address(tmp, -Interpreter::stackElementSize); 324 } 325 326 327 void MacroAssembler::align(int modulus) { 328 while (offset() % modulus != 0) { 329 nop(); 330 } 331 } 332 333 int MacroAssembler::set_last_Java_frame(Register last_java_sp, 334 Register last_java_fp, 335 bool save_last_java_pc, 336 Register tmp) { 337 int pc_offset; 338 if (last_java_fp != noreg) { 339 // optional 340 str(last_java_fp, Address(Rthread, JavaThread::last_Java_fp_offset())); 341 _fp_saved = true; 342 } else { 343 _fp_saved = false; 344 } 345 if (AARCH64_ONLY(true) NOT_AARCH64(save_last_java_pc)) { // optional on 32-bit ARM 346 #ifdef AARCH64 347 pc_offset = mov_pc_to(tmp); 348 str(tmp, Address(Rthread, JavaThread::last_Java_pc_offset())); 349 #else 350 str(PC, Address(Rthread, JavaThread::last_Java_pc_offset())); 351 pc_offset = offset() + VM_Version::stored_pc_adjustment(); 352 #endif 353 _pc_saved = true; 354 } else { 355 _pc_saved = false; 356 pc_offset = -1; 357 } 358 // According to comment in javaFrameAnchorm SP must be saved last, so that other 359 // entries are valid when SP is set. 360 361 // However, this is probably not a strong constrainst since for instance PC is 362 // sometimes read from the stack at SP... but is pushed later (by the call). Hence, 363 // we now write the fields in the expected order but we have not added a StoreStore 364 // barrier. 365 366 // XXX: if the ordering is really important, PC should always be saved (without forgetting 367 // to update oop_map offsets) and a StoreStore barrier might be needed. 368 369 if (last_java_sp == noreg) { 370 last_java_sp = SP; // always saved 371 } 372 #ifdef AARCH64 373 if (last_java_sp == SP) { 374 mov(tmp, SP); 375 str(tmp, Address(Rthread, JavaThread::last_Java_sp_offset())); 376 } else { 377 str(last_java_sp, Address(Rthread, JavaThread::last_Java_sp_offset())); 378 } 379 #else 380 str(last_java_sp, Address(Rthread, JavaThread::last_Java_sp_offset())); 381 #endif 382 383 return pc_offset; // for oopmaps 384 } 385 386 void MacroAssembler::reset_last_Java_frame(Register tmp) { 387 const Register Rzero = zero_register(tmp); 388 str(Rzero, Address(Rthread, JavaThread::last_Java_sp_offset())); 389 if (_fp_saved) { 390 str(Rzero, Address(Rthread, JavaThread::last_Java_fp_offset())); 391 } 392 if (_pc_saved) { 393 str(Rzero, Address(Rthread, JavaThread::last_Java_pc_offset())); 394 } 395 } 396 397 398 // Implementation of call_VM versions 399 400 void MacroAssembler::call_VM_leaf_helper(address entry_point, int number_of_arguments) { 401 assert(number_of_arguments >= 0, "cannot have negative number of arguments"); 402 assert(number_of_arguments <= 4, "cannot have more than 4 arguments"); 403 404 #ifndef AARCH64 405 // Safer to save R9 here since callers may have been written 406 // assuming R9 survives. This is suboptimal but is not worth 407 // optimizing for the few platforms where R9 is scratched. 408 push(RegisterSet(R4) | R9ifScratched); 409 mov(R4, SP); 410 bic(SP, SP, StackAlignmentInBytes - 1); 411 #endif // AARCH64 412 call(entry_point, relocInfo::runtime_call_type); 413 #ifndef AARCH64 414 mov(SP, R4); 415 pop(RegisterSet(R4) | R9ifScratched); 416 #endif // AARCH64 417 } 418 419 420 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 421 assert(number_of_arguments >= 0, "cannot have negative number of arguments"); 422 assert(number_of_arguments <= 3, "cannot have more than 3 arguments"); 423 424 const Register tmp = Rtemp; 425 assert_different_registers(oop_result, tmp); 426 427 set_last_Java_frame(SP, FP, true, tmp); 428 429 #ifdef ASSERT 430 AARCH64_ONLY(if (UseCompressedOops || UseCompressedClassPointers) { verify_heapbase("call_VM_helper: heap base corrupted?"); }); 431 #endif // ASSERT 432 433 #ifndef AARCH64 434 #if R9_IS_SCRATCHED 435 // Safer to save R9 here since callers may have been written 436 // assuming R9 survives. This is suboptimal but is not worth 437 // optimizing for the few platforms where R9 is scratched. 438 439 // Note: cannot save R9 above the saved SP (some calls expect for 440 // instance the Java stack top at the saved SP) 441 // => once saved (with set_last_Java_frame), decrease SP before rounding to 442 // ensure the slot at SP will be free for R9). 443 sub(SP, SP, 4); 444 bic(SP, SP, StackAlignmentInBytes - 1); 445 str(R9, Address(SP, 0)); 446 #else 447 bic(SP, SP, StackAlignmentInBytes - 1); 448 #endif // R9_IS_SCRATCHED 449 #endif 450 451 mov(R0, Rthread); 452 call(entry_point, relocInfo::runtime_call_type); 453 454 #ifndef AARCH64 455 #if R9_IS_SCRATCHED 456 ldr(R9, Address(SP, 0)); 457 #endif 458 ldr(SP, Address(Rthread, JavaThread::last_Java_sp_offset())); 459 #endif 460 461 reset_last_Java_frame(tmp); 462 463 // C++ interp handles this in the interpreter 464 check_and_handle_popframe(); 465 check_and_handle_earlyret(); 466 467 if (check_exceptions) { 468 // check for pending exceptions 469 ldr(tmp, Address(Rthread, Thread::pending_exception_offset())); 470 #ifdef AARCH64 471 Label L; 472 cbz(tmp, L); 473 mov_pc_to(Rexception_pc); 474 b(StubRoutines::forward_exception_entry()); 475 bind(L); 476 #else 477 cmp(tmp, 0); 478 mov(Rexception_pc, PC, ne); 479 b(StubRoutines::forward_exception_entry(), ne); 480 #endif // AARCH64 481 } 482 483 // get oop result if there is one and reset the value in the thread 484 if (oop_result->is_valid()) { 485 get_vm_result(oop_result, tmp); 486 } 487 } 488 489 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 490 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 491 } 492 493 494 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 495 assert (arg_1 == R1, "fixed register for arg_1"); 496 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 497 } 498 499 500 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 501 assert (arg_1 == R1, "fixed register for arg_1"); 502 assert (arg_2 == R2, "fixed register for arg_2"); 503 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 504 } 505 506 507 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 508 assert (arg_1 == R1, "fixed register for arg_1"); 509 assert (arg_2 == R2, "fixed register for arg_2"); 510 assert (arg_3 == R3, "fixed register for arg_3"); 511 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 512 } 513 514 515 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 516 // Not used on ARM 517 Unimplemented(); 518 } 519 520 521 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 522 // Not used on ARM 523 Unimplemented(); 524 } 525 526 527 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 528 // Not used on ARM 529 Unimplemented(); 530 } 531 532 533 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 534 // Not used on ARM 535 Unimplemented(); 536 } 537 538 // Raw call, without saving/restoring registers, exception handling, etc. 539 // Mainly used from various stubs. 540 void MacroAssembler::call_VM(address entry_point, bool save_R9_if_scratched) { 541 const Register tmp = Rtemp; // Rtemp free since scratched by call 542 set_last_Java_frame(SP, FP, true, tmp); 543 #if R9_IS_SCRATCHED 544 if (save_R9_if_scratched) { 545 // Note: Saving also R10 for alignment. 546 push(RegisterSet(R9, R10)); 547 } 548 #endif 549 mov(R0, Rthread); 550 call(entry_point, relocInfo::runtime_call_type); 551 #if R9_IS_SCRATCHED 552 if (save_R9_if_scratched) { 553 pop(RegisterSet(R9, R10)); 554 } 555 #endif 556 reset_last_Java_frame(tmp); 557 } 558 559 void MacroAssembler::call_VM_leaf(address entry_point) { 560 call_VM_leaf_helper(entry_point, 0); 561 } 562 563 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 564 assert (arg_1 == R0, "fixed register for arg_1"); 565 call_VM_leaf_helper(entry_point, 1); 566 } 567 568 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 569 assert (arg_1 == R0, "fixed register for arg_1"); 570 assert (arg_2 == R1, "fixed register for arg_2"); 571 call_VM_leaf_helper(entry_point, 2); 572 } 573 574 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 575 assert (arg_1 == R0, "fixed register for arg_1"); 576 assert (arg_2 == R1, "fixed register for arg_2"); 577 assert (arg_3 == R2, "fixed register for arg_3"); 578 call_VM_leaf_helper(entry_point, 3); 579 } 580 581 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4) { 582 assert (arg_1 == R0, "fixed register for arg_1"); 583 assert (arg_2 == R1, "fixed register for arg_2"); 584 assert (arg_3 == R2, "fixed register for arg_3"); 585 assert (arg_4 == R3, "fixed register for arg_4"); 586 call_VM_leaf_helper(entry_point, 4); 587 } 588 589 void MacroAssembler::get_vm_result(Register oop_result, Register tmp) { 590 assert_different_registers(oop_result, tmp); 591 ldr(oop_result, Address(Rthread, JavaThread::vm_result_offset())); 592 str(zero_register(tmp), Address(Rthread, JavaThread::vm_result_offset())); 593 verify_oop(oop_result); 594 } 595 596 void MacroAssembler::get_vm_result_2(Register metadata_result, Register tmp) { 597 assert_different_registers(metadata_result, tmp); 598 ldr(metadata_result, Address(Rthread, JavaThread::vm_result_2_offset())); 599 str(zero_register(tmp), Address(Rthread, JavaThread::vm_result_2_offset())); 600 } 601 602 void MacroAssembler::add_rc(Register dst, Register arg1, RegisterOrConstant arg2) { 603 if (arg2.is_register()) { 604 add(dst, arg1, arg2.as_register()); 605 } else { 606 add(dst, arg1, arg2.as_constant()); 607 } 608 } 609 610 void MacroAssembler::add_slow(Register rd, Register rn, int c) { 611 #ifdef AARCH64 612 if (c == 0) { 613 if (rd != rn) { 614 mov(rd, rn); 615 } 616 return; 617 } 618 if (c < 0) { 619 sub_slow(rd, rn, -c); 620 return; 621 } 622 if (c > right_n_bits(24)) { 623 guarantee(rd != rn, "no large add_slow with only one register"); 624 mov_slow(rd, c); 625 add(rd, rn, rd); 626 } else { 627 int lo = c & right_n_bits(12); 628 int hi = (c >> 12) & right_n_bits(12); 629 if (lo != 0) { 630 add(rd, rn, lo, lsl0); 631 } 632 if (hi != 0) { 633 add(rd, (lo == 0) ? rn : rd, hi, lsl12); 634 } 635 } 636 #else 637 // This function is used in compiler for handling large frame offsets 638 if ((c < 0) && (((-c) & ~0x3fc) == 0)) { 639 return sub(rd, rn, (-c)); 640 } 641 int low = c & 0x3fc; 642 if (low != 0) { 643 add(rd, rn, low); 644 rn = rd; 645 } 646 if (c & ~0x3fc) { 647 assert(AsmOperand::is_rotated_imm(c & ~0x3fc), "unsupported add_slow offset %d", c); 648 add(rd, rn, c & ~0x3fc); 649 } else if (rd != rn) { 650 assert(c == 0, ""); 651 mov(rd, rn); // need to generate at least one move! 652 } 653 #endif // AARCH64 654 } 655 656 void MacroAssembler::sub_slow(Register rd, Register rn, int c) { 657 #ifdef AARCH64 658 if (c <= 0) { 659 add_slow(rd, rn, -c); 660 return; 661 } 662 if (c > right_n_bits(24)) { 663 guarantee(rd != rn, "no large sub_slow with only one register"); 664 mov_slow(rd, c); 665 sub(rd, rn, rd); 666 } else { 667 int lo = c & right_n_bits(12); 668 int hi = (c >> 12) & right_n_bits(12); 669 if (lo != 0) { 670 sub(rd, rn, lo, lsl0); 671 } 672 if (hi != 0) { 673 sub(rd, (lo == 0) ? rn : rd, hi, lsl12); 674 } 675 } 676 #else 677 // This function is used in compiler for handling large frame offsets 678 if ((c < 0) && (((-c) & ~0x3fc) == 0)) { 679 return add(rd, rn, (-c)); 680 } 681 int low = c & 0x3fc; 682 if (low != 0) { 683 sub(rd, rn, low); 684 rn = rd; 685 } 686 if (c & ~0x3fc) { 687 assert(AsmOperand::is_rotated_imm(c & ~0x3fc), "unsupported sub_slow offset %d", c); 688 sub(rd, rn, c & ~0x3fc); 689 } else if (rd != rn) { 690 assert(c == 0, ""); 691 mov(rd, rn); // need to generate at least one move! 692 } 693 #endif // AARCH64 694 } 695 696 void MacroAssembler::mov_slow(Register rd, address addr) { 697 // do *not* call the non relocated mov_related_address 698 mov_slow(rd, (intptr_t)addr); 699 } 700 701 void MacroAssembler::mov_slow(Register rd, const char *str) { 702 mov_slow(rd, (intptr_t)str); 703 } 704 705 #ifdef AARCH64 706 707 // Common code for mov_slow and instr_count_for_mov_slow. 708 // Returns number of instructions of mov_slow pattern, 709 // generating it if non-null MacroAssembler is given. 710 int MacroAssembler::mov_slow_helper(Register rd, intptr_t c, MacroAssembler* masm) { 711 // This code pattern is matched in NativeIntruction::is_mov_slow. 712 // Update it at modifications. 713 714 const intx mask = right_n_bits(16); 715 // 1 movz instruction 716 for (int base_shift = 0; base_shift < 64; base_shift += 16) { 717 if ((c & ~(mask << base_shift)) == 0) { 718 if (masm != NULL) { 719 masm->movz(rd, ((uintx)c) >> base_shift, base_shift); 720 } 721 return 1; 722 } 723 } 724 // 1 movn instruction 725 for (int base_shift = 0; base_shift < 64; base_shift += 16) { 726 if (((~c) & ~(mask << base_shift)) == 0) { 727 if (masm != NULL) { 728 masm->movn(rd, ((uintx)(~c)) >> base_shift, base_shift); 729 } 730 return 1; 731 } 732 } 733 // 1 orr instruction 734 { 735 LogicalImmediate imm(c, false); 736 if (imm.is_encoded()) { 737 if (masm != NULL) { 738 masm->orr(rd, ZR, imm); 739 } 740 return 1; 741 } 742 } 743 // 1 movz/movn + up to 3 movk instructions 744 int zeroes = 0; 745 int ones = 0; 746 for (int base_shift = 0; base_shift < 64; base_shift += 16) { 747 int part = (c >> base_shift) & mask; 748 if (part == 0) { 749 ++zeroes; 750 } else if (part == mask) { 751 ++ones; 752 } 753 } 754 int def_bits = 0; 755 if (ones > zeroes) { 756 def_bits = mask; 757 } 758 int inst_count = 0; 759 for (int base_shift = 0; base_shift < 64; base_shift += 16) { 760 int part = (c >> base_shift) & mask; 761 if (part != def_bits) { 762 if (masm != NULL) { 763 if (inst_count > 0) { 764 masm->movk(rd, part, base_shift); 765 } else { 766 if (def_bits == 0) { 767 masm->movz(rd, part, base_shift); 768 } else { 769 masm->movn(rd, ~part & mask, base_shift); 770 } 771 } 772 } 773 inst_count++; 774 } 775 } 776 assert((1 <= inst_count) && (inst_count <= 4), "incorrect number of instructions"); 777 return inst_count; 778 } 779 780 void MacroAssembler::mov_slow(Register rd, intptr_t c) { 781 #ifdef ASSERT 782 int off = offset(); 783 #endif 784 (void) mov_slow_helper(rd, c, this); 785 assert(offset() - off == instr_count_for_mov_slow(c) * InstructionSize, "size mismatch"); 786 } 787 788 // Counts instructions generated by mov_slow(rd, c). 789 int MacroAssembler::instr_count_for_mov_slow(intptr_t c) { 790 return mov_slow_helper(noreg, c, NULL); 791 } 792 793 int MacroAssembler::instr_count_for_mov_slow(address c) { 794 return mov_slow_helper(noreg, (intptr_t)c, NULL); 795 } 796 797 #else 798 799 void MacroAssembler::mov_slow(Register rd, intptr_t c, AsmCondition cond) { 800 if (AsmOperand::is_rotated_imm(c)) { 801 mov(rd, c, cond); 802 } else if (AsmOperand::is_rotated_imm(~c)) { 803 mvn(rd, ~c, cond); 804 } else if (VM_Version::supports_movw()) { 805 movw(rd, c & 0xffff, cond); 806 if ((unsigned int)c >> 16) { 807 movt(rd, (unsigned int)c >> 16, cond); 808 } 809 } else { 810 // Find first non-zero bit 811 int shift = 0; 812 while ((c & (3 << shift)) == 0) { 813 shift += 2; 814 } 815 // Put the least significant part of the constant 816 int mask = 0xff << shift; 817 mov(rd, c & mask, cond); 818 // Add up to 3 other parts of the constant; 819 // each of them can be represented as rotated_imm 820 if (c & (mask << 8)) { 821 orr(rd, rd, c & (mask << 8), cond); 822 } 823 if (c & (mask << 16)) { 824 orr(rd, rd, c & (mask << 16), cond); 825 } 826 if (c & (mask << 24)) { 827 orr(rd, rd, c & (mask << 24), cond); 828 } 829 } 830 } 831 832 #endif // AARCH64 833 834 void MacroAssembler::mov_oop(Register rd, jobject o, int oop_index, 835 #ifdef AARCH64 836 bool patchable 837 #else 838 AsmCondition cond 839 #endif 840 ) { 841 842 if (o == NULL) { 843 #ifdef AARCH64 844 if (patchable) { 845 nop(); 846 } 847 mov(rd, ZR); 848 #else 849 mov(rd, 0, cond); 850 #endif 851 return; 852 } 853 854 if (oop_index == 0) { 855 oop_index = oop_recorder()->allocate_oop_index(o); 856 } 857 relocate(oop_Relocation::spec(oop_index)); 858 859 #ifdef AARCH64 860 if (patchable) { 861 nop(); 862 } 863 ldr(rd, pc()); 864 #else 865 if (VM_Version::supports_movw()) { 866 movw(rd, 0, cond); 867 movt(rd, 0, cond); 868 } else { 869 ldr(rd, Address(PC), cond); 870 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data). 871 nop(); 872 } 873 #endif 874 } 875 876 void MacroAssembler::mov_metadata(Register rd, Metadata* o, int metadata_index AARCH64_ONLY_ARG(bool patchable)) { 877 if (o == NULL) { 878 #ifdef AARCH64 879 if (patchable) { 880 nop(); 881 } 882 #endif 883 mov(rd, 0); 884 return; 885 } 886 887 if (metadata_index == 0) { 888 metadata_index = oop_recorder()->allocate_metadata_index(o); 889 } 890 relocate(metadata_Relocation::spec(metadata_index)); 891 892 #ifdef AARCH64 893 if (patchable) { 894 nop(); 895 } 896 #ifdef COMPILER2 897 if (!patchable && VM_Version::prefer_moves_over_load_literal()) { 898 mov_slow(rd, (address)o); 899 return; 900 } 901 #endif 902 ldr(rd, pc()); 903 #else 904 if (VM_Version::supports_movw()) { 905 movw(rd, ((int)o) & 0xffff); 906 movt(rd, (unsigned int)o >> 16); 907 } else { 908 ldr(rd, Address(PC)); 909 // Extra nop to handle case of large offset of metadata placeholder (see NativeMovConstReg::set_data). 910 nop(); 911 } 912 #endif // AARCH64 913 } 914 915 void MacroAssembler::mov_float(FloatRegister fd, jfloat c NOT_AARCH64_ARG(AsmCondition cond)) { 916 Label skip_constant; 917 union { 918 jfloat f; 919 jint i; 920 } accessor; 921 accessor.f = c; 922 923 #ifdef AARCH64 924 // TODO-AARCH64 - try to optimize loading of float constants with fmov and/or mov_slow 925 Label L; 926 ldr_s(fd, target(L)); 927 b(skip_constant); 928 bind(L); 929 emit_int32(accessor.i); 930 bind(skip_constant); 931 #else 932 flds(fd, Address(PC), cond); 933 b(skip_constant); 934 emit_int32(accessor.i); 935 bind(skip_constant); 936 #endif // AARCH64 937 } 938 939 void MacroAssembler::mov_double(FloatRegister fd, jdouble c NOT_AARCH64_ARG(AsmCondition cond)) { 940 Label skip_constant; 941 union { 942 jdouble d; 943 jint i[2]; 944 } accessor; 945 accessor.d = c; 946 947 #ifdef AARCH64 948 // TODO-AARCH64 - try to optimize loading of double constants with fmov 949 Label L; 950 ldr_d(fd, target(L)); 951 b(skip_constant); 952 align(wordSize); 953 bind(L); 954 emit_int32(accessor.i[0]); 955 emit_int32(accessor.i[1]); 956 bind(skip_constant); 957 #else 958 fldd(fd, Address(PC), cond); 959 b(skip_constant); 960 emit_int32(accessor.i[0]); 961 emit_int32(accessor.i[1]); 962 bind(skip_constant); 963 #endif // AARCH64 964 } 965 966 void MacroAssembler::ldr_global_s32(Register reg, address address_of_global) { 967 intptr_t addr = (intptr_t) address_of_global; 968 #ifdef AARCH64 969 assert((addr & 0x3) == 0, "address should be aligned"); 970 971 // FIXME: TODO 972 if (false && page_reachable_from_cache(address_of_global)) { 973 assert(false,"TODO: relocate"); 974 //relocate(); 975 adrp(reg, address_of_global); 976 ldrsw(reg, Address(reg, addr & 0xfff)); 977 } else { 978 mov_slow(reg, addr & ~0x3fff); 979 ldrsw(reg, Address(reg, addr & 0x3fff)); 980 } 981 #else 982 mov_slow(reg, addr & ~0xfff); 983 ldr(reg, Address(reg, addr & 0xfff)); 984 #endif 985 } 986 987 void MacroAssembler::ldr_global_ptr(Register reg, address address_of_global) { 988 #ifdef AARCH64 989 intptr_t addr = (intptr_t) address_of_global; 990 assert ((addr & 0x7) == 0, "address should be aligned"); 991 mov_slow(reg, addr & ~0x7fff); 992 ldr(reg, Address(reg, addr & 0x7fff)); 993 #else 994 ldr_global_s32(reg, address_of_global); 995 #endif 996 } 997 998 void MacroAssembler::ldrb_global(Register reg, address address_of_global) { 999 intptr_t addr = (intptr_t) address_of_global; 1000 mov_slow(reg, addr & ~0xfff); 1001 ldrb(reg, Address(reg, addr & 0xfff)); 1002 } 1003 1004 void MacroAssembler::zero_extend(Register rd, Register rn, int bits) { 1005 #ifdef AARCH64 1006 switch (bits) { 1007 case 8: uxtb(rd, rn); break; 1008 case 16: uxth(rd, rn); break; 1009 case 32: mov_w(rd, rn); break; 1010 default: ShouldNotReachHere(); 1011 } 1012 #else 1013 if (bits <= 8) { 1014 andr(rd, rn, (1 << bits) - 1); 1015 } else if (bits >= 24) { 1016 bic(rd, rn, -1 << bits); 1017 } else { 1018 mov(rd, AsmOperand(rn, lsl, 32 - bits)); 1019 mov(rd, AsmOperand(rd, lsr, 32 - bits)); 1020 } 1021 #endif 1022 } 1023 1024 void MacroAssembler::sign_extend(Register rd, Register rn, int bits) { 1025 #ifdef AARCH64 1026 switch (bits) { 1027 case 8: sxtb(rd, rn); break; 1028 case 16: sxth(rd, rn); break; 1029 case 32: sxtw(rd, rn); break; 1030 default: ShouldNotReachHere(); 1031 } 1032 #else 1033 mov(rd, AsmOperand(rn, lsl, 32 - bits)); 1034 mov(rd, AsmOperand(rd, asr, 32 - bits)); 1035 #endif 1036 } 1037 1038 #ifndef AARCH64 1039 1040 void MacroAssembler::long_move(Register rd_lo, Register rd_hi, 1041 Register rn_lo, Register rn_hi, 1042 AsmCondition cond) { 1043 if (rd_lo != rn_hi) { 1044 if (rd_lo != rn_lo) { mov(rd_lo, rn_lo, cond); } 1045 if (rd_hi != rn_hi) { mov(rd_hi, rn_hi, cond); } 1046 } else if (rd_hi != rn_lo) { 1047 if (rd_hi != rn_hi) { mov(rd_hi, rn_hi, cond); } 1048 if (rd_lo != rn_lo) { mov(rd_lo, rn_lo, cond); } 1049 } else { 1050 eor(rd_lo, rd_hi, rd_lo, cond); 1051 eor(rd_hi, rd_lo, rd_hi, cond); 1052 eor(rd_lo, rd_hi, rd_lo, cond); 1053 } 1054 } 1055 1056 void MacroAssembler::long_shift(Register rd_lo, Register rd_hi, 1057 Register rn_lo, Register rn_hi, 1058 AsmShift shift, Register count) { 1059 Register tmp; 1060 if (rd_lo != rn_lo && rd_lo != rn_hi && rd_lo != count) { 1061 tmp = rd_lo; 1062 } else { 1063 tmp = rd_hi; 1064 } 1065 assert_different_registers(tmp, count, rn_lo, rn_hi); 1066 1067 subs(tmp, count, 32); 1068 if (shift == lsl) { 1069 assert_different_registers(rd_hi, rn_lo); 1070 assert_different_registers(count, rd_hi); 1071 mov(rd_hi, AsmOperand(rn_lo, shift, tmp), pl); 1072 rsb(tmp, count, 32, mi); 1073 if (rd_hi == rn_hi) { 1074 mov(rd_hi, AsmOperand(rn_hi, lsl, count), mi); 1075 orr(rd_hi, rd_hi, AsmOperand(rn_lo, lsr, tmp), mi); 1076 } else { 1077 mov(rd_hi, AsmOperand(rn_lo, lsr, tmp), mi); 1078 orr(rd_hi, rd_hi, AsmOperand(rn_hi, lsl, count), mi); 1079 } 1080 mov(rd_lo, AsmOperand(rn_lo, shift, count)); 1081 } else { 1082 assert_different_registers(rd_lo, rn_hi); 1083 assert_different_registers(rd_lo, count); 1084 mov(rd_lo, AsmOperand(rn_hi, shift, tmp), pl); 1085 rsb(tmp, count, 32, mi); 1086 if (rd_lo == rn_lo) { 1087 mov(rd_lo, AsmOperand(rn_lo, lsr, count), mi); 1088 orr(rd_lo, rd_lo, AsmOperand(rn_hi, lsl, tmp), mi); 1089 } else { 1090 mov(rd_lo, AsmOperand(rn_hi, lsl, tmp), mi); 1091 orr(rd_lo, rd_lo, AsmOperand(rn_lo, lsr, count), mi); 1092 } 1093 mov(rd_hi, AsmOperand(rn_hi, shift, count)); 1094 } 1095 } 1096 1097 void MacroAssembler::long_shift(Register rd_lo, Register rd_hi, 1098 Register rn_lo, Register rn_hi, 1099 AsmShift shift, int count) { 1100 assert(count != 0 && (count & ~63) == 0, "must be"); 1101 1102 if (shift == lsl) { 1103 assert_different_registers(rd_hi, rn_lo); 1104 if (count >= 32) { 1105 mov(rd_hi, AsmOperand(rn_lo, lsl, count - 32)); 1106 mov(rd_lo, 0); 1107 } else { 1108 mov(rd_hi, AsmOperand(rn_hi, lsl, count)); 1109 orr(rd_hi, rd_hi, AsmOperand(rn_lo, lsr, 32 - count)); 1110 mov(rd_lo, AsmOperand(rn_lo, lsl, count)); 1111 } 1112 } else { 1113 assert_different_registers(rd_lo, rn_hi); 1114 if (count >= 32) { 1115 if (count == 32) { 1116 mov(rd_lo, rn_hi); 1117 } else { 1118 mov(rd_lo, AsmOperand(rn_hi, shift, count - 32)); 1119 } 1120 if (shift == asr) { 1121 mov(rd_hi, AsmOperand(rn_hi, asr, 0)); 1122 } else { 1123 mov(rd_hi, 0); 1124 } 1125 } else { 1126 mov(rd_lo, AsmOperand(rn_lo, lsr, count)); 1127 orr(rd_lo, rd_lo, AsmOperand(rn_hi, lsl, 32 - count)); 1128 mov(rd_hi, AsmOperand(rn_hi, shift, count)); 1129 } 1130 } 1131 } 1132 #endif // !AARCH64 1133 1134 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 1135 // This code pattern is matched in NativeIntruction::skip_verify_oop. 1136 // Update it at modifications. 1137 if (!VerifyOops) return; 1138 1139 char buffer[64]; 1140 #ifdef COMPILER1 1141 if (CommentedAssembly) { 1142 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1143 block_comment(buffer); 1144 } 1145 #endif 1146 const char* msg_buffer = NULL; 1147 { 1148 ResourceMark rm; 1149 stringStream ss; 1150 ss.print("%s at offset %d (%s:%d)", s, offset(), file, line); 1151 msg_buffer = code_string(ss.as_string()); 1152 } 1153 1154 save_all_registers(); 1155 1156 if (reg != R2) { 1157 mov(R2, reg); // oop to verify 1158 } 1159 mov(R1, SP); // register save area 1160 1161 Label done; 1162 InlinedString Lmsg(msg_buffer); 1163 ldr_literal(R0, Lmsg); // message 1164 1165 // call indirectly to solve generation ordering problem 1166 ldr_global_ptr(Rtemp, StubRoutines::verify_oop_subroutine_entry_address()); 1167 call(Rtemp); 1168 1169 restore_all_registers(); 1170 1171 b(done); 1172 #ifdef COMPILER2 1173 int off = offset(); 1174 #endif 1175 bind_literal(Lmsg); 1176 #ifdef COMPILER2 1177 if (offset() - off == 1 * wordSize) { 1178 // no padding, so insert nop for worst-case sizing 1179 nop(); 1180 } 1181 #endif 1182 bind(done); 1183 } 1184 1185 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 1186 if (!VerifyOops) return; 1187 1188 const char* msg_buffer = NULL; 1189 { 1190 ResourceMark rm; 1191 stringStream ss; 1192 if ((addr.base() == SP) && (addr.index()==noreg)) { 1193 ss.print("verify_oop_addr SP[%d]: %s", (int)addr.disp(), s); 1194 } else { 1195 ss.print("verify_oop_addr: %s", s); 1196 } 1197 ss.print(" (%s:%d)", file, line); 1198 msg_buffer = code_string(ss.as_string()); 1199 } 1200 1201 int push_size = save_all_registers(); 1202 1203 if (addr.base() == SP) { 1204 // computes an addr that takes into account the push 1205 if (addr.index() != noreg) { 1206 Register new_base = addr.index() == R2 ? R1 : R2; // avoid corrupting the index 1207 add(new_base, SP, push_size); 1208 addr = addr.rebase(new_base); 1209 } else { 1210 addr = addr.plus_disp(push_size); 1211 } 1212 } 1213 1214 ldr(R2, addr); // oop to verify 1215 mov(R1, SP); // register save area 1216 1217 Label done; 1218 InlinedString Lmsg(msg_buffer); 1219 ldr_literal(R0, Lmsg); // message 1220 1221 // call indirectly to solve generation ordering problem 1222 ldr_global_ptr(Rtemp, StubRoutines::verify_oop_subroutine_entry_address()); 1223 call(Rtemp); 1224 1225 restore_all_registers(); 1226 1227 b(done); 1228 bind_literal(Lmsg); 1229 bind(done); 1230 } 1231 1232 void MacroAssembler::null_check(Register reg, Register tmp, int offset) { 1233 if (needs_explicit_null_check(offset)) { 1234 #ifdef AARCH64 1235 ldr(ZR, Address(reg)); 1236 #else 1237 assert_different_registers(reg, tmp); 1238 if (tmp == noreg) { 1239 tmp = Rtemp; 1240 assert((! Thread::current()->is_Compiler_thread()) || 1241 (! (ciEnv::current()->task() == NULL)) || 1242 (! (ciEnv::current()->comp_level() == CompLevel_full_optimization)), 1243 "Rtemp not available in C2"); // explicit tmp register required 1244 // XXX: could we mark the code buffer as not compatible with C2 ? 1245 } 1246 ldr(tmp, Address(reg)); 1247 #endif 1248 } 1249 } 1250 1251 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. 1252 void MacroAssembler::eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, 1253 RegisterOrConstant size_expression, Label& slow_case) { 1254 if (!Universe::heap()->supports_inline_contig_alloc()) { 1255 b(slow_case); 1256 return; 1257 } 1258 1259 CollectedHeap* ch = Universe::heap(); 1260 1261 const Register top_addr = tmp1; 1262 const Register heap_end = tmp2; 1263 1264 if (size_expression.is_register()) { 1265 assert_different_registers(obj, obj_end, top_addr, heap_end, size_expression.as_register()); 1266 } else { 1267 assert_different_registers(obj, obj_end, top_addr, heap_end); 1268 } 1269 1270 bool load_const = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw() ); // TODO-AARCH64 check performance 1271 if (load_const) { 1272 mov_address(top_addr, (address)Universe::heap()->top_addr(), symbolic_Relocation::eden_top_reference); 1273 } else { 1274 ldr(top_addr, Address(Rthread, JavaThread::heap_top_addr_offset())); 1275 } 1276 // Calculate new heap_top by adding the size of the object 1277 Label retry; 1278 bind(retry); 1279 1280 #ifdef AARCH64 1281 ldxr(obj, top_addr); 1282 #else 1283 ldr(obj, Address(top_addr)); 1284 #endif // AARCH64 1285 1286 ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr())); 1287 add_rc(obj_end, obj, size_expression); 1288 // Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case. 1289 cmp(obj_end, obj); 1290 b(slow_case, lo); 1291 // Update heap_top if allocation succeeded 1292 cmp(obj_end, heap_end); 1293 b(slow_case, hi); 1294 1295 #ifdef AARCH64 1296 stxr(heap_end/*scratched*/, obj_end, top_addr); 1297 cbnz_w(heap_end, retry); 1298 #else 1299 atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/); 1300 b(retry, ne); 1301 #endif // AARCH64 1302 } 1303 1304 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. 1305 void MacroAssembler::tlab_allocate(Register obj, Register obj_end, Register tmp1, 1306 RegisterOrConstant size_expression, Label& slow_case) { 1307 const Register tlab_end = tmp1; 1308 assert_different_registers(obj, obj_end, tlab_end); 1309 1310 ldr(obj, Address(Rthread, JavaThread::tlab_top_offset())); 1311 ldr(tlab_end, Address(Rthread, JavaThread::tlab_end_offset())); 1312 add_rc(obj_end, obj, size_expression); 1313 cmp(obj_end, tlab_end); 1314 b(slow_case, hi); 1315 str(obj_end, Address(Rthread, JavaThread::tlab_top_offset())); 1316 } 1317 1318 // Fills memory regions [start..end] with zeroes. Clobbers `start` and `tmp` registers. 1319 void MacroAssembler::zero_memory(Register start, Register end, Register tmp) { 1320 Label loop; 1321 const Register ptr = start; 1322 1323 #ifdef AARCH64 1324 // TODO-AARCH64 - compare performance of 2x word zeroing with simple 1x 1325 const Register size = tmp; 1326 Label remaining, done; 1327 1328 sub(size, end, start); 1329 1330 #ifdef ASSERT 1331 { Label L; 1332 tst(size, wordSize - 1); 1333 b(L, eq); 1334 stop("size is not a multiple of wordSize"); 1335 bind(L); 1336 } 1337 #endif // ASSERT 1338 1339 subs(size, size, wordSize); 1340 b(remaining, le); 1341 1342 // Zero by 2 words per iteration. 1343 bind(loop); 1344 subs(size, size, 2*wordSize); 1345 stp(ZR, ZR, Address(ptr, 2*wordSize, post_indexed)); 1346 b(loop, gt); 1347 1348 bind(remaining); 1349 b(done, ne); 1350 str(ZR, Address(ptr)); 1351 bind(done); 1352 #else 1353 mov(tmp, 0); 1354 bind(loop); 1355 cmp(ptr, end); 1356 str(tmp, Address(ptr, wordSize, post_indexed), lo); 1357 b(loop, lo); 1358 #endif // AARCH64 1359 } 1360 1361 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp) { 1362 #ifdef AARCH64 1363 ldr(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset()))); 1364 add_rc(tmp, tmp, size_in_bytes); 1365 str(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset()))); 1366 #else 1367 // Bump total bytes allocated by this thread 1368 Label done; 1369 1370 // Borrow the Rthread for alloc counter 1371 Register Ralloc = Rthread; 1372 add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset())); 1373 ldr(tmp, Address(Ralloc)); 1374 adds(tmp, tmp, size_in_bytes); 1375 str(tmp, Address(Ralloc), cc); 1376 b(done, cc); 1377 1378 // Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated) 1379 // To do so ldrd/strd instructions used which require an even-odd pair of registers. Such a request could be difficult to satisfy by 1380 // allocating those registers on a higher level, therefore the routine is ready to allocate a pair itself. 1381 Register low, high; 1382 // Select ether R0/R1 or R2/R3 1383 1384 if (size_in_bytes.is_register() && (size_in_bytes.as_register() == R0 || size_in_bytes.as_register() == R1)) { 1385 low = R2; 1386 high = R3; 1387 } else { 1388 low = R0; 1389 high = R1; 1390 } 1391 push(RegisterSet(low, high)); 1392 1393 ldrd(low, Address(Ralloc)); 1394 adds(low, low, size_in_bytes); 1395 adc(high, high, 0); 1396 strd(low, Address(Ralloc)); 1397 1398 pop(RegisterSet(low, high)); 1399 1400 bind(done); 1401 1402 // Unborrow the Rthread 1403 sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset())); 1404 #endif // AARCH64 1405 } 1406 1407 void MacroAssembler::arm_stack_overflow_check(int frame_size_in_bytes, Register tmp) { 1408 // Version of AbstractAssembler::generate_stack_overflow_check optimized for ARM 1409 if (UseStackBanging) { 1410 const int page_size = os::vm_page_size(); 1411 1412 sub_slow(tmp, SP, JavaThread::stack_shadow_zone_size()); 1413 strb(R0, Address(tmp)); 1414 #ifdef AARCH64 1415 for (; frame_size_in_bytes >= page_size; frame_size_in_bytes -= page_size) { 1416 sub(tmp, tmp, page_size); 1417 strb(R0, Address(tmp)); 1418 } 1419 #else 1420 for (; frame_size_in_bytes >= page_size; frame_size_in_bytes -= 0xff0) { 1421 strb(R0, Address(tmp, -0xff0, pre_indexed)); 1422 } 1423 #endif // AARCH64 1424 } 1425 } 1426 1427 void MacroAssembler::arm_stack_overflow_check(Register Rsize, Register tmp) { 1428 if (UseStackBanging) { 1429 Label loop; 1430 1431 mov(tmp, SP); 1432 add_slow(Rsize, Rsize, JavaThread::stack_shadow_zone_size() - os::vm_page_size()); 1433 #ifdef AARCH64 1434 sub(tmp, tmp, Rsize); 1435 bind(loop); 1436 subs(Rsize, Rsize, os::vm_page_size()); 1437 strb(ZR, Address(tmp, Rsize)); 1438 #else 1439 bind(loop); 1440 subs(Rsize, Rsize, 0xff0); 1441 strb(R0, Address(tmp, -0xff0, pre_indexed)); 1442 #endif // AARCH64 1443 b(loop, hi); 1444 } 1445 } 1446 1447 void MacroAssembler::stop(const char* msg) { 1448 // This code pattern is matched in NativeIntruction::is_stop. 1449 // Update it at modifications. 1450 #ifdef COMPILER1 1451 if (CommentedAssembly) { 1452 block_comment("stop"); 1453 } 1454 #endif 1455 1456 InlinedAddress Ldebug(CAST_FROM_FN_PTR(address, MacroAssembler::debug)); 1457 InlinedString Lmsg(msg); 1458 1459 // save all registers for further inspection 1460 save_all_registers(); 1461 1462 ldr_literal(R0, Lmsg); // message 1463 mov(R1, SP); // register save area 1464 1465 #ifdef AARCH64 1466 ldr_literal(Rtemp, Ldebug); 1467 br(Rtemp); 1468 #else 1469 ldr_literal(PC, Ldebug); // call MacroAssembler::debug 1470 #endif // AARCH64 1471 1472 #if defined(COMPILER2) && defined(AARCH64) 1473 int off = offset(); 1474 #endif 1475 bind_literal(Lmsg); 1476 bind_literal(Ldebug); 1477 #if defined(COMPILER2) && defined(AARCH64) 1478 if (offset() - off == 2 * wordSize) { 1479 // no padding, so insert nop for worst-case sizing 1480 nop(); 1481 } 1482 #endif 1483 } 1484 1485 void MacroAssembler::warn(const char* msg) { 1486 #ifdef COMPILER1 1487 if (CommentedAssembly) { 1488 block_comment("warn"); 1489 } 1490 #endif 1491 1492 InlinedAddress Lwarn(CAST_FROM_FN_PTR(address, warning)); 1493 InlinedString Lmsg(msg); 1494 Label done; 1495 1496 int push_size = save_caller_save_registers(); 1497 1498 #ifdef AARCH64 1499 // TODO-AARCH64 - get rid of extra debug parameters 1500 mov(R1, LR); 1501 mov(R2, FP); 1502 add(R3, SP, push_size); 1503 #endif 1504 1505 ldr_literal(R0, Lmsg); // message 1506 ldr_literal(LR, Lwarn); // call warning 1507 1508 call(LR); 1509 1510 restore_caller_save_registers(); 1511 1512 b(done); 1513 bind_literal(Lmsg); 1514 bind_literal(Lwarn); 1515 bind(done); 1516 } 1517 1518 1519 int MacroAssembler::save_all_registers() { 1520 // This code pattern is matched in NativeIntruction::is_save_all_registers. 1521 // Update it at modifications. 1522 #ifdef AARCH64 1523 const Register tmp = Rtemp; 1524 raw_push(R30, ZR); 1525 for (int i = 28; i >= 0; i -= 2) { 1526 raw_push(as_Register(i), as_Register(i+1)); 1527 } 1528 mov_pc_to(tmp); 1529 str(tmp, Address(SP, 31*wordSize)); 1530 ldr(tmp, Address(SP, tmp->encoding()*wordSize)); 1531 return 32*wordSize; 1532 #else 1533 push(RegisterSet(R0, R12) | RegisterSet(LR) | RegisterSet(PC)); 1534 return 15*wordSize; 1535 #endif // AARCH64 1536 } 1537 1538 void MacroAssembler::restore_all_registers() { 1539 #ifdef AARCH64 1540 for (int i = 0; i <= 28; i += 2) { 1541 raw_pop(as_Register(i), as_Register(i+1)); 1542 } 1543 raw_pop(R30, ZR); 1544 #else 1545 pop(RegisterSet(R0, R12) | RegisterSet(LR)); // restore registers 1546 add(SP, SP, wordSize); // discard saved PC 1547 #endif // AARCH64 1548 } 1549 1550 int MacroAssembler::save_caller_save_registers() { 1551 #ifdef AARCH64 1552 for (int i = 0; i <= 16; i += 2) { 1553 raw_push(as_Register(i), as_Register(i+1)); 1554 } 1555 raw_push(R18, LR); 1556 return 20*wordSize; 1557 #else 1558 #if R9_IS_SCRATCHED 1559 // Save also R10 to preserve alignment 1560 push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR) | RegisterSet(R9,R10)); 1561 return 8*wordSize; 1562 #else 1563 push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR)); 1564 return 6*wordSize; 1565 #endif 1566 #endif // AARCH64 1567 } 1568 1569 void MacroAssembler::restore_caller_save_registers() { 1570 #ifdef AARCH64 1571 raw_pop(R18, LR); 1572 for (int i = 16; i >= 0; i -= 2) { 1573 raw_pop(as_Register(i), as_Register(i+1)); 1574 } 1575 #else 1576 #if R9_IS_SCRATCHED 1577 pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR) | RegisterSet(R9,R10)); 1578 #else 1579 pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR)); 1580 #endif 1581 #endif // AARCH64 1582 } 1583 1584 void MacroAssembler::debug(const char* msg, const intx* registers) { 1585 // In order to get locks to work, we need to fake a in_VM state 1586 JavaThread* thread = JavaThread::current(); 1587 thread->set_thread_state(_thread_in_vm); 1588 1589 if (ShowMessageBoxOnError) { 1590 ttyLocker ttyl; 1591 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1592 BytecodeCounter::print(); 1593 } 1594 if (os::message_box(msg, "Execution stopped, print registers?")) { 1595 #ifdef AARCH64 1596 // saved registers: R0-R30, PC 1597 const int nregs = 32; 1598 #else 1599 // saved registers: R0-R12, LR, PC 1600 const int nregs = 15; 1601 const Register regs[nregs] = {R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, PC}; 1602 #endif // AARCH64 1603 1604 for (int i = 0; i < nregs AARCH64_ONLY(-1); i++) { 1605 tty->print_cr("%s = " INTPTR_FORMAT, AARCH64_ONLY(as_Register(i)) NOT_AARCH64(regs[i])->name(), registers[i]); 1606 } 1607 1608 #ifdef AARCH64 1609 tty->print_cr("pc = " INTPTR_FORMAT, registers[nregs-1]); 1610 #endif // AARCH64 1611 1612 // derive original SP value from the address of register save area 1613 tty->print_cr("%s = " INTPTR_FORMAT, SP->name(), p2i(®isters[nregs])); 1614 } 1615 BREAKPOINT; 1616 } else { 1617 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1618 } 1619 assert(false, "DEBUG MESSAGE: %s", msg); 1620 fatal("%s", msg); // returning from MacroAssembler::debug is not supported 1621 } 1622 1623 void MacroAssembler::unimplemented(const char* what) { 1624 const char* buf = NULL; 1625 { 1626 ResourceMark rm; 1627 stringStream ss; 1628 ss.print("unimplemented: %s", what); 1629 buf = code_string(ss.as_string()); 1630 } 1631 stop(buf); 1632 } 1633 1634 1635 // Implementation of FixedSizeCodeBlock 1636 1637 FixedSizeCodeBlock::FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled) : 1638 _masm(masm), _start(masm->pc()), _size_in_instrs(size_in_instrs), _enabled(enabled) { 1639 } 1640 1641 FixedSizeCodeBlock::~FixedSizeCodeBlock() { 1642 if (_enabled) { 1643 address curr_pc = _masm->pc(); 1644 1645 assert(_start < curr_pc, "invalid current pc"); 1646 guarantee(curr_pc <= _start + _size_in_instrs * Assembler::InstructionSize, "code block is too long"); 1647 1648 int nops_count = (_start - curr_pc) / Assembler::InstructionSize + _size_in_instrs; 1649 for (int i = 0; i < nops_count; i++) { 1650 _masm->nop(); 1651 } 1652 } 1653 } 1654 1655 #ifdef AARCH64 1656 1657 // Serializes memory. 1658 // tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM 1659 void MacroAssembler::membar(Membar_mask_bits order_constraint, Register tmp) { 1660 // TODO-AARCH64 investigate dsb vs dmb effects 1661 if (order_constraint == StoreStore) { 1662 dmb(DMB_st); 1663 } else if ((order_constraint & ~(LoadLoad | LoadStore)) == 0) { 1664 dmb(DMB_ld); 1665 } else { 1666 dmb(DMB_all); 1667 } 1668 } 1669 1670 #else 1671 1672 // Serializes memory. Potentially blows flags and reg. 1673 // tmp is a scratch for v6 co-processor write op (could be noreg for other architecure versions) 1674 // preserve_flags takes a longer path in LoadStore case (dmb rather then control dependency) to preserve status flags. Optional. 1675 // load_tgt is an ordered load target in a LoadStore case only, to create dependency between the load operation and conditional branch. Optional. 1676 void MacroAssembler::membar(Membar_mask_bits order_constraint, 1677 Register tmp, 1678 bool preserve_flags, 1679 Register load_tgt) { 1680 1681 if (order_constraint == StoreStore) { 1682 dmb(DMB_st, tmp); 1683 } else if ((order_constraint & StoreLoad) || 1684 (order_constraint & LoadLoad) || 1685 (order_constraint & StoreStore) || 1686 (load_tgt == noreg) || 1687 preserve_flags) { 1688 dmb(DMB_all, tmp); 1689 } else { 1690 // LoadStore: speculative stores reordeing is prohibited 1691 1692 // By providing an ordered load target register, we avoid an extra memory load reference 1693 Label not_taken; 1694 bind(not_taken); 1695 cmp(load_tgt, load_tgt); 1696 b(not_taken, ne); 1697 } 1698 } 1699 1700 #endif // AARCH64 1701 1702 // If "allow_fallthrough_on_failure" is false, we always branch to "slow_case" 1703 // on failure, so fall-through can only mean success. 1704 // "one_shot" controls whether we loop and retry to mitigate spurious failures. 1705 // This is only needed for C2, which for some reason does not rety, 1706 // while C1/interpreter does. 1707 // TODO: measure if it makes a difference 1708 1709 void MacroAssembler::cas_for_lock_acquire(Register oldval, Register newval, 1710 Register base, Register tmp, Label &slow_case, 1711 bool allow_fallthrough_on_failure, bool one_shot) 1712 { 1713 1714 bool fallthrough_is_success = false; 1715 1716 // ARM Litmus Test example does prefetching here. 1717 // TODO: investigate if it helps performance 1718 1719 // The last store was to the displaced header, so to prevent 1720 // reordering we must issue a StoreStore or Release barrier before 1721 // the CAS store. 1722 1723 #ifdef AARCH64 1724 1725 Register Rscratch = tmp; 1726 Register Roop = base; 1727 Register mark = oldval; 1728 Register Rbox = newval; 1729 Label loop; 1730 1731 assert(oopDesc::mark_offset_in_bytes() == 0, "must be"); 1732 1733 // Instead of StoreStore here, we use store-release-exclusive below 1734 1735 bind(loop); 1736 1737 ldaxr(tmp, base); // acquire 1738 cmp(tmp, oldval); 1739 b(slow_case, ne); 1740 stlxr(tmp, newval, base); // release 1741 if (one_shot) { 1742 cmp_w(tmp, 0); 1743 } else { 1744 cbnz_w(tmp, loop); 1745 fallthrough_is_success = true; 1746 } 1747 1748 // MemBarAcquireLock would normally go here, but 1749 // we already do ldaxr+stlxr above, which has 1750 // Sequential Consistency 1751 1752 #else 1753 membar(MacroAssembler::StoreStore, noreg); 1754 1755 if (one_shot) { 1756 ldrex(tmp, Address(base, oopDesc::mark_offset_in_bytes())); 1757 cmp(tmp, oldval); 1758 strex(tmp, newval, Address(base, oopDesc::mark_offset_in_bytes()), eq); 1759 cmp(tmp, 0, eq); 1760 } else { 1761 atomic_cas_bool(oldval, newval, base, oopDesc::mark_offset_in_bytes(), tmp); 1762 } 1763 1764 // MemBarAcquireLock barrier 1765 // According to JSR-133 Cookbook, this should be LoadLoad | LoadStore, 1766 // but that doesn't prevent a load or store from floating up between 1767 // the load and store in the CAS sequence, so play it safe and 1768 // do a full fence. 1769 membar(Membar_mask_bits(LoadLoad | LoadStore | StoreStore | StoreLoad), noreg); 1770 #endif 1771 if (!fallthrough_is_success && !allow_fallthrough_on_failure) { 1772 b(slow_case, ne); 1773 } 1774 } 1775 1776 void MacroAssembler::cas_for_lock_release(Register oldval, Register newval, 1777 Register base, Register tmp, Label &slow_case, 1778 bool allow_fallthrough_on_failure, bool one_shot) 1779 { 1780 1781 bool fallthrough_is_success = false; 1782 1783 assert_different_registers(oldval,newval,base,tmp); 1784 1785 #ifdef AARCH64 1786 Label loop; 1787 1788 assert(oopDesc::mark_offset_in_bytes() == 0, "must be"); 1789 1790 bind(loop); 1791 ldxr(tmp, base); 1792 cmp(tmp, oldval); 1793 b(slow_case, ne); 1794 // MemBarReleaseLock barrier 1795 stlxr(tmp, newval, base); 1796 if (one_shot) { 1797 cmp_w(tmp, 0); 1798 } else { 1799 cbnz_w(tmp, loop); 1800 fallthrough_is_success = true; 1801 } 1802 #else 1803 // MemBarReleaseLock barrier 1804 // According to JSR-133 Cookbook, this should be StoreStore | LoadStore, 1805 // but that doesn't prevent a load or store from floating down between 1806 // the load and store in the CAS sequence, so play it safe and 1807 // do a full fence. 1808 membar(Membar_mask_bits(LoadLoad | LoadStore | StoreStore | StoreLoad), tmp); 1809 1810 if (one_shot) { 1811 ldrex(tmp, Address(base, oopDesc::mark_offset_in_bytes())); 1812 cmp(tmp, oldval); 1813 strex(tmp, newval, Address(base, oopDesc::mark_offset_in_bytes()), eq); 1814 cmp(tmp, 0, eq); 1815 } else { 1816 atomic_cas_bool(oldval, newval, base, oopDesc::mark_offset_in_bytes(), tmp); 1817 } 1818 #endif 1819 if (!fallthrough_is_success && !allow_fallthrough_on_failure) { 1820 b(slow_case, ne); 1821 } 1822 1823 // ExitEnter 1824 // According to JSR-133 Cookbook, this should be StoreLoad, the same 1825 // barrier that follows volatile store. 1826 // TODO: Should be able to remove on armv8 if volatile loads 1827 // use the load-acquire instruction. 1828 membar(StoreLoad, noreg); 1829 } 1830 1831 #ifndef PRODUCT 1832 1833 // Preserves flags and all registers. 1834 // On SMP the updated value might not be visible to external observers without a sychronization barrier 1835 void MacroAssembler::cond_atomic_inc32(AsmCondition cond, int* counter_addr) { 1836 if (counter_addr != NULL) { 1837 InlinedAddress counter_addr_literal((address)counter_addr); 1838 Label done, retry; 1839 if (cond != al) { 1840 b(done, inverse(cond)); 1841 } 1842 1843 #ifdef AARCH64 1844 raw_push(R0, R1); 1845 raw_push(R2, ZR); 1846 1847 ldr_literal(R0, counter_addr_literal); 1848 1849 bind(retry); 1850 ldxr_w(R1, R0); 1851 add_w(R1, R1, 1); 1852 stxr_w(R2, R1, R0); 1853 cbnz_w(R2, retry); 1854 1855 raw_pop(R2, ZR); 1856 raw_pop(R0, R1); 1857 #else 1858 push(RegisterSet(R0, R3) | RegisterSet(Rtemp)); 1859 ldr_literal(R0, counter_addr_literal); 1860 1861 mrs(CPSR, Rtemp); 1862 1863 bind(retry); 1864 ldr_s32(R1, Address(R0)); 1865 add(R2, R1, 1); 1866 atomic_cas_bool(R1, R2, R0, 0, R3); 1867 b(retry, ne); 1868 1869 msr(CPSR_fsxc, Rtemp); 1870 1871 pop(RegisterSet(R0, R3) | RegisterSet(Rtemp)); 1872 #endif // AARCH64 1873 1874 b(done); 1875 bind_literal(counter_addr_literal); 1876 1877 bind(done); 1878 } 1879 } 1880 1881 #endif // !PRODUCT 1882 1883 1884 // Building block for CAS cases of biased locking: makes CAS and records statistics. 1885 // The slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set. 1886 void MacroAssembler::biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg, 1887 Register tmp, Label& slow_case, int* counter_addr) { 1888 1889 cas_for_lock_acquire(old_mark_reg, new_mark_reg, obj_reg, tmp, slow_case); 1890 #ifdef ASSERT 1891 breakpoint(ne); // Fallthrough only on success 1892 #endif 1893 #ifndef PRODUCT 1894 if (counter_addr != NULL) { 1895 cond_atomic_inc32(al, counter_addr); 1896 } 1897 #endif // !PRODUCT 1898 } 1899 1900 int MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg, 1901 bool swap_reg_contains_mark, 1902 Register tmp2, 1903 Label& done, Label& slow_case, 1904 BiasedLockingCounters* counters) { 1905 // obj_reg must be preserved (at least) if the bias locking fails 1906 // tmp_reg is a temporary register 1907 // swap_reg was used as a temporary but contained a value 1908 // that was used afterwards in some call pathes. Callers 1909 // have been fixed so that swap_reg no longer needs to be 1910 // saved. 1911 // Rtemp in no longer scratched 1912 1913 assert(UseBiasedLocking, "why call this otherwise?"); 1914 assert_different_registers(obj_reg, swap_reg, tmp_reg, tmp2); 1915 guarantee(swap_reg!=tmp_reg, "invariant"); 1916 assert(tmp_reg != noreg, "must supply tmp_reg"); 1917 1918 #ifndef PRODUCT 1919 if (PrintBiasedLockingStatistics && (counters == NULL)) { 1920 counters = BiasedLocking::counters(); 1921 } 1922 #endif 1923 1924 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 1925 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1926 1927 // Biased locking 1928 // See whether the lock is currently biased toward our thread and 1929 // whether the epoch is still valid 1930 // Note that the runtime guarantees sufficient alignment of JavaThread 1931 // pointers to allow age to be placed into low bits 1932 // First check to see whether biasing is even enabled for this object 1933 Label cas_label; 1934 1935 // The null check applies to the mark loading, if we need to load it. 1936 // If the mark has already been loaded in swap_reg then it has already 1937 // been performed and the offset is irrelevant. 1938 int null_check_offset = offset(); 1939 if (!swap_reg_contains_mark) { 1940 ldr(swap_reg, mark_addr); 1941 } 1942 1943 // On MP platform loads could return 'stale' values in some cases. 1944 // That is acceptable since either CAS or slow case path is taken in the worst case. 1945 1946 andr(tmp_reg, swap_reg, (uintx)markOopDesc::biased_lock_mask_in_place); 1947 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 1948 1949 b(cas_label, ne); 1950 1951 // The bias pattern is present in the object's header. Need to check 1952 // whether the bias owner and the epoch are both still current. 1953 load_klass(tmp_reg, obj_reg); 1954 ldr(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset())); 1955 orr(tmp_reg, tmp_reg, Rthread); 1956 eor(tmp_reg, tmp_reg, swap_reg); 1957 1958 #ifdef AARCH64 1959 ands(tmp_reg, tmp_reg, ~((uintx) markOopDesc::age_mask_in_place)); 1960 #else 1961 bics(tmp_reg, tmp_reg, ((int) markOopDesc::age_mask_in_place)); 1962 #endif // AARCH64 1963 1964 #ifndef PRODUCT 1965 if (counters != NULL) { 1966 cond_atomic_inc32(eq, counters->biased_lock_entry_count_addr()); 1967 } 1968 #endif // !PRODUCT 1969 1970 b(done, eq); 1971 1972 Label try_revoke_bias; 1973 Label try_rebias; 1974 1975 // At this point we know that the header has the bias pattern and 1976 // that we are not the bias owner in the current epoch. We need to 1977 // figure out more details about the state of the header in order to 1978 // know what operations can be legally performed on the object's 1979 // header. 1980 1981 // If the low three bits in the xor result aren't clear, that means 1982 // the prototype header is no longer biased and we have to revoke 1983 // the bias on this object. 1984 tst(tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place); 1985 b(try_revoke_bias, ne); 1986 1987 // Biasing is still enabled for this data type. See whether the 1988 // epoch of the current bias is still valid, meaning that the epoch 1989 // bits of the mark word are equal to the epoch bits of the 1990 // prototype header. (Note that the prototype header's epoch bits 1991 // only change at a safepoint.) If not, attempt to rebias the object 1992 // toward the current thread. Note that we must be absolutely sure 1993 // that the current epoch is invalid in order to do this because 1994 // otherwise the manipulations it performs on the mark word are 1995 // illegal. 1996 tst(tmp_reg, (uintx)markOopDesc::epoch_mask_in_place); 1997 b(try_rebias, ne); 1998 1999 // tmp_reg has the age, epoch and pattern bits cleared 2000 // The remaining (owner) bits are (Thread ^ current_owner) 2001 2002 // The epoch of the current bias is still valid but we know nothing 2003 // about the owner; it might be set or it might be clear. Try to 2004 // acquire the bias of the object using an atomic operation. If this 2005 // fails we will go in to the runtime to revoke the object's bias. 2006 // Note that we first construct the presumed unbiased header so we 2007 // don't accidentally blow away another thread's valid bias. 2008 2009 // Note that we know the owner is not ourself. Hence, success can 2010 // only happen when the owner bits is 0 2011 2012 #ifdef AARCH64 2013 // Bit mask biased_lock + age + epoch is not a valid AArch64 logical immediate, as it has 2014 // cleared bit in the middle (cms bit). So it is loaded with separate instruction. 2015 mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place)); 2016 andr(swap_reg, swap_reg, tmp2); 2017 #else 2018 // until the assembler can be made smarter, we need to make some assumptions about the values 2019 // so we can optimize this: 2020 assert((markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed"); 2021 2022 mov(swap_reg, AsmOperand(swap_reg, lsl, 23)); 2023 mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markOop with thread bits cleared (for CAS) 2024 #endif // AARCH64 2025 2026 orr(tmp_reg, swap_reg, Rthread); // new mark 2027 2028 biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, slow_case, 2029 (counters != NULL) ? counters->anonymously_biased_lock_entry_count_addr() : NULL); 2030 2031 // If the biasing toward our thread failed, this means that 2032 // another thread succeeded in biasing it toward itself and we 2033 // need to revoke that bias. The revocation will occur in the 2034 // interpreter runtime in the slow case. 2035 2036 b(done); 2037 2038 bind(try_rebias); 2039 2040 // At this point we know the epoch has expired, meaning that the 2041 // current "bias owner", if any, is actually invalid. Under these 2042 // circumstances _only_, we are allowed to use the current header's 2043 // value as the comparison value when doing the cas to acquire the 2044 // bias in the current epoch. In other words, we allow transfer of 2045 // the bias from one thread to another directly in this situation. 2046 2047 // tmp_reg low (not owner) bits are (age: 0 | pattern&epoch: prototype^swap_reg) 2048 2049 eor(tmp_reg, tmp_reg, swap_reg); // OK except for owner bits (age preserved !) 2050 2051 // owner bits 'random'. Set them to Rthread. 2052 #ifdef AARCH64 2053 mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place)); 2054 andr(tmp_reg, tmp_reg, tmp2); 2055 #else 2056 mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23)); 2057 mov(tmp_reg, AsmOperand(tmp_reg, lsr, 23)); 2058 #endif // AARCH64 2059 2060 orr(tmp_reg, tmp_reg, Rthread); // new mark 2061 2062 biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, slow_case, 2063 (counters != NULL) ? counters->rebiased_lock_entry_count_addr() : NULL); 2064 2065 // If the biasing toward our thread failed, then another thread 2066 // succeeded in biasing it toward itself and we need to revoke that 2067 // bias. The revocation will occur in the runtime in the slow case. 2068 2069 b(done); 2070 2071 bind(try_revoke_bias); 2072 2073 // The prototype mark in the klass doesn't have the bias bit set any 2074 // more, indicating that objects of this data type are not supposed 2075 // to be biased any more. We are going to try to reset the mark of 2076 // this object to the prototype value and fall through to the 2077 // CAS-based locking scheme. Note that if our CAS fails, it means 2078 // that another thread raced us for the privilege of revoking the 2079 // bias of this particular object, so it's okay to continue in the 2080 // normal locking code. 2081 2082 // tmp_reg low (not owner) bits are (age: 0 | pattern&epoch: prototype^swap_reg) 2083 2084 eor(tmp_reg, tmp_reg, swap_reg); // OK except for owner bits (age preserved !) 2085 2086 // owner bits 'random'. Clear them 2087 #ifdef AARCH64 2088 mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place)); 2089 andr(tmp_reg, tmp_reg, tmp2); 2090 #else 2091 mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23)); 2092 mov(tmp_reg, AsmOperand(tmp_reg, lsr, 23)); 2093 #endif // AARCH64 2094 2095 biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, cas_label, 2096 (counters != NULL) ? counters->revoked_lock_entry_count_addr() : NULL); 2097 2098 // Fall through to the normal CAS-based lock, because no matter what 2099 // the result of the above CAS, some thread must have succeeded in 2100 // removing the bias bit from the object's header. 2101 2102 bind(cas_label); 2103 2104 return null_check_offset; 2105 } 2106 2107 2108 void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done) { 2109 assert(UseBiasedLocking, "why call this otherwise?"); 2110 2111 // Check for biased locking unlock case, which is a no-op 2112 // Note: we do not have to check the thread ID for two reasons. 2113 // First, the interpreter checks for IllegalMonitorStateException at 2114 // a higher level. Second, if the bias was revoked while we held the 2115 // lock, the object could not be rebiased toward another thread, so 2116 // the bias bit would be clear. 2117 ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 2118 2119 andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place); 2120 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 2121 b(done, eq); 2122 } 2123 2124 2125 void MacroAssembler::resolve_jobject(Register value, 2126 Register tmp1, 2127 Register tmp2) { 2128 assert_different_registers(value, tmp1, tmp2); 2129 Label done, not_weak; 2130 cbz(value, done); // Use NULL as-is. 2131 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u); 2132 tbz(value, 0, not_weak); // Test for jweak tag. 2133 2134 // Resolve jweak. 2135 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 2136 Address(value, -JNIHandles::weak_tag_value), value, tmp1, tmp2, noreg); 2137 b(done); 2138 bind(not_weak); 2139 // Resolve (untagged) jobject. 2140 access_load_at(T_OBJECT, IN_NATIVE, 2141 Address(value, 0), value, tmp1, tmp2, noreg); 2142 verify_oop(value); 2143 bind(done); 2144 } 2145 2146 2147 ////////////////////////////////////////////////////////////////////////////////// 2148 2149 #ifdef AARCH64 2150 2151 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2152 switch (size_in_bytes) { 2153 case 8: ldr(dst, src); break; 2154 case 4: is_signed ? ldr_s32(dst, src) : ldr_u32(dst, src); break; 2155 case 2: is_signed ? ldrsh(dst, src) : ldrh(dst, src); break; 2156 case 1: is_signed ? ldrsb(dst, src) : ldrb(dst, src); break; 2157 default: ShouldNotReachHere(); 2158 } 2159 } 2160 2161 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 2162 switch (size_in_bytes) { 2163 case 8: str(src, dst); break; 2164 case 4: str_32(src, dst); break; 2165 case 2: strh(src, dst); break; 2166 case 1: strb(src, dst); break; 2167 default: ShouldNotReachHere(); 2168 } 2169 } 2170 2171 #else 2172 2173 void MacroAssembler::load_sized_value(Register dst, Address src, 2174 size_t size_in_bytes, bool is_signed, AsmCondition cond) { 2175 switch (size_in_bytes) { 2176 case 4: ldr(dst, src, cond); break; 2177 case 2: is_signed ? ldrsh(dst, src, cond) : ldrh(dst, src, cond); break; 2178 case 1: is_signed ? ldrsb(dst, src, cond) : ldrb(dst, src, cond); break; 2179 default: ShouldNotReachHere(); 2180 } 2181 } 2182 2183 2184 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond) { 2185 switch (size_in_bytes) { 2186 case 4: str(src, dst, cond); break; 2187 case 2: strh(src, dst, cond); break; 2188 case 1: strb(src, dst, cond); break; 2189 default: ShouldNotReachHere(); 2190 } 2191 } 2192 #endif // AARCH64 2193 2194 // Look up the method for a megamorphic invokeinterface call. 2195 // The target method is determined by <Rinterf, Rindex>. 2196 // The receiver klass is in Rklass. 2197 // On success, the result will be in method_result, and execution falls through. 2198 // On failure, execution transfers to the given label. 2199 void MacroAssembler::lookup_interface_method(Register Rklass, 2200 Register Rintf, 2201 RegisterOrConstant itable_index, 2202 Register method_result, 2203 Register Rscan, 2204 Register Rtmp, 2205 Label& L_no_such_interface) { 2206 2207 assert_different_registers(Rklass, Rintf, Rscan, Rtmp); 2208 2209 const int entry_size = itableOffsetEntry::size() * HeapWordSize; 2210 assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience"); 2211 2212 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2213 const int base = in_bytes(Klass::vtable_start_offset()); 2214 const int scale = exact_log2(vtableEntry::size_in_bytes()); 2215 ldr_s32(Rtmp, Address(Rklass, Klass::vtable_length_offset())); // Get length of vtable 2216 add(Rscan, Rklass, base); 2217 add(Rscan, Rscan, AsmOperand(Rtmp, lsl, scale)); 2218 2219 // Search through the itable for an interface equal to incoming Rintf 2220 // itable looks like [intface][offset][intface][offset][intface][offset] 2221 2222 Label loop; 2223 bind(loop); 2224 ldr(Rtmp, Address(Rscan, entry_size, post_indexed)); 2225 #ifdef AARCH64 2226 Label found; 2227 cmp(Rtmp, Rintf); 2228 b(found, eq); 2229 cbnz(Rtmp, loop); 2230 #else 2231 cmp(Rtmp, Rintf); // set ZF and CF if interface is found 2232 cmn(Rtmp, 0, ne); // check if tmp == 0 and clear CF if it is 2233 b(loop, ne); 2234 #endif // AARCH64 2235 2236 #ifdef AARCH64 2237 b(L_no_such_interface); 2238 bind(found); 2239 #else 2240 // CF == 0 means we reached the end of itable without finding icklass 2241 b(L_no_such_interface, cc); 2242 #endif // !AARCH64 2243 2244 if (method_result != noreg) { 2245 // Interface found at previous position of Rscan, now load the method 2246 ldr_s32(Rtmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size)); 2247 if (itable_index.is_register()) { 2248 add(Rtmp, Rtmp, Rklass); // Add offset to Klass* 2249 assert(itableMethodEntry::size() * HeapWordSize == wordSize, "adjust the scaling in the code below"); 2250 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust the offset in the code below"); 2251 ldr(method_result, Address::indexed_ptr(Rtmp, itable_index.as_register())); 2252 } else { 2253 int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index.as_constant() + 2254 itableMethodEntry::method_offset_in_bytes(); 2255 add_slow(method_result, Rklass, method_offset); 2256 ldr(method_result, Address(method_result, Rtmp)); 2257 } 2258 } 2259 } 2260 2261 #ifdef COMPILER2 2262 // TODO: 8 bytes at a time? pre-fetch? 2263 // Compare char[] arrays aligned to 4 bytes. 2264 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 2265 Register limit, Register result, 2266 Register chr1, Register chr2, Label& Ldone) { 2267 Label Lvector, Lloop; 2268 2269 // Note: limit contains number of bytes (2*char_elements) != 0. 2270 tst(limit, 0x2); // trailing character ? 2271 b(Lvector, eq); 2272 2273 // compare the trailing char 2274 sub(limit, limit, sizeof(jchar)); 2275 ldrh(chr1, Address(ary1, limit)); 2276 ldrh(chr2, Address(ary2, limit)); 2277 cmp(chr1, chr2); 2278 mov(result, 0, ne); // not equal 2279 b(Ldone, ne); 2280 2281 // only one char ? 2282 tst(limit, limit); 2283 mov(result, 1, eq); 2284 b(Ldone, eq); 2285 2286 // word by word compare, dont't need alignment check 2287 bind(Lvector); 2288 2289 // Shift ary1 and ary2 to the end of the arrays, negate limit 2290 add(ary1, limit, ary1); 2291 add(ary2, limit, ary2); 2292 neg(limit, limit); 2293 2294 bind(Lloop); 2295 ldr_u32(chr1, Address(ary1, limit)); 2296 ldr_u32(chr2, Address(ary2, limit)); 2297 cmp_32(chr1, chr2); 2298 mov(result, 0, ne); // not equal 2299 b(Ldone, ne); 2300 adds(limit, limit, 2*sizeof(jchar)); 2301 b(Lloop, ne); 2302 2303 // Caller should set it: 2304 // mov(result_reg, 1); //equal 2305 } 2306 #endif 2307 2308 void MacroAssembler::inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2) { 2309 mov_slow(tmpreg1, counter_addr); 2310 ldr_s32(tmpreg2, tmpreg1); 2311 add_32(tmpreg2, tmpreg2, 1); 2312 str_32(tmpreg2, tmpreg1); 2313 } 2314 2315 void MacroAssembler::floating_cmp(Register dst) { 2316 #ifdef AARCH64 2317 NOT_TESTED(); 2318 cset(dst, gt); // 1 if '>', else 0 2319 csinv(dst, dst, ZR, ge); // previous value if '>=', else -1 2320 #else 2321 vmrs(dst, FPSCR); 2322 orr(dst, dst, 0x08000000); 2323 eor(dst, dst, AsmOperand(dst, lsl, 3)); 2324 mov(dst, AsmOperand(dst, asr, 30)); 2325 #endif 2326 } 2327 2328 void MacroAssembler::restore_default_fp_mode() { 2329 #ifdef AARCH64 2330 msr(SysReg_FPCR, ZR); 2331 #else 2332 #ifndef __SOFTFP__ 2333 // Round to Near mode, IEEE compatible, masked exceptions 2334 mov(Rtemp, 0); 2335 vmsr(FPSCR, Rtemp); 2336 #endif // !__SOFTFP__ 2337 #endif // AARCH64 2338 } 2339 2340 #ifndef AARCH64 2341 // 24-bit word range == 26-bit byte range 2342 bool check26(int offset) { 2343 // this could be simplified, but it mimics encoding and decoding 2344 // an actual branch insrtuction 2345 int off1 = offset << 6 >> 8; 2346 int encoded = off1 & ((1<<24)-1); 2347 int decoded = encoded << 8 >> 6; 2348 return offset == decoded; 2349 } 2350 #endif // !AARCH64 2351 2352 // Perform some slight adjustments so the default 32MB code cache 2353 // is fully reachable. 2354 static inline address first_cache_address() { 2355 return CodeCache::low_bound() + sizeof(HeapBlock::Header); 2356 } 2357 static inline address last_cache_address() { 2358 return CodeCache::high_bound() - Assembler::InstructionSize; 2359 } 2360 2361 #ifdef AARCH64 2362 // Can we reach target using ADRP? 2363 bool MacroAssembler::page_reachable_from_cache(address target) { 2364 intptr_t cl = (intptr_t)first_cache_address() & ~0xfff; 2365 intptr_t ch = (intptr_t)last_cache_address() & ~0xfff; 2366 intptr_t addr = (intptr_t)target & ~0xfff; 2367 2368 intptr_t loffset = addr - cl; 2369 intptr_t hoffset = addr - ch; 2370 return is_imm_in_range(loffset >> 12, 21, 0) && is_imm_in_range(hoffset >> 12, 21, 0); 2371 } 2372 #endif 2373 2374 // Can we reach target using unconditional branch or call from anywhere 2375 // in the code cache (because code can be relocated)? 2376 bool MacroAssembler::_reachable_from_cache(address target) { 2377 #ifdef __thumb__ 2378 if ((1 & (intptr_t)target) != 0) { 2379 // Return false to avoid 'b' if we need switching to THUMB mode. 2380 return false; 2381 } 2382 #endif 2383 2384 address cl = first_cache_address(); 2385 address ch = last_cache_address(); 2386 2387 if (ForceUnreachable) { 2388 // Only addresses from CodeCache can be treated as reachable. 2389 if (target < CodeCache::low_bound() || CodeCache::high_bound() < target) { 2390 return false; 2391 } 2392 } 2393 2394 intptr_t loffset = (intptr_t)target - (intptr_t)cl; 2395 intptr_t hoffset = (intptr_t)target - (intptr_t)ch; 2396 2397 #ifdef AARCH64 2398 return is_offset_in_range(loffset, 26) && is_offset_in_range(hoffset, 26); 2399 #else 2400 return check26(loffset - 8) && check26(hoffset - 8); 2401 #endif 2402 } 2403 2404 bool MacroAssembler::reachable_from_cache(address target) { 2405 assert(CodeCache::contains(pc()), "not supported"); 2406 return _reachable_from_cache(target); 2407 } 2408 2409 // Can we reach the entire code cache from anywhere else in the code cache? 2410 bool MacroAssembler::_cache_fully_reachable() { 2411 address cl = first_cache_address(); 2412 address ch = last_cache_address(); 2413 return _reachable_from_cache(cl) && _reachable_from_cache(ch); 2414 } 2415 2416 bool MacroAssembler::cache_fully_reachable() { 2417 assert(CodeCache::contains(pc()), "not supported"); 2418 return _cache_fully_reachable(); 2419 } 2420 2421 void MacroAssembler::jump(address target, relocInfo::relocType rtype, Register scratch NOT_AARCH64_ARG(AsmCondition cond)) { 2422 assert((rtype == relocInfo::runtime_call_type) || (rtype == relocInfo::none), "not supported"); 2423 if (reachable_from_cache(target)) { 2424 relocate(rtype); 2425 b(target NOT_AARCH64_ARG(cond)); 2426 return; 2427 } 2428 2429 // Note: relocate is not needed for the code below, 2430 // encoding targets in absolute format. 2431 if (ignore_non_patchable_relocations()) { 2432 rtype = relocInfo::none; 2433 } 2434 2435 #ifdef AARCH64 2436 assert (scratch != noreg, "should be specified"); 2437 InlinedAddress address_literal(target, rtype); 2438 ldr_literal(scratch, address_literal); 2439 br(scratch); 2440 int off = offset(); 2441 bind_literal(address_literal); 2442 #ifdef COMPILER2 2443 if (offset() - off == wordSize) { 2444 // no padding, so insert nop for worst-case sizing 2445 nop(); 2446 } 2447 #endif 2448 #else 2449 if (VM_Version::supports_movw() && (scratch != noreg) && (rtype == relocInfo::none)) { 2450 // Note: this version cannot be (atomically) patched 2451 mov_slow(scratch, (intptr_t)target, cond); 2452 bx(scratch, cond); 2453 } else { 2454 Label skip; 2455 InlinedAddress address_literal(target); 2456 if (cond != al) { 2457 b(skip, inverse(cond)); 2458 } 2459 relocate(rtype); 2460 ldr_literal(PC, address_literal); 2461 bind_literal(address_literal); 2462 bind(skip); 2463 } 2464 #endif // AARCH64 2465 } 2466 2467 // Similar to jump except that: 2468 // - near calls are valid only if any destination in the cache is near 2469 // - no movt/movw (not atomically patchable) 2470 void MacroAssembler::patchable_jump(address target, relocInfo::relocType rtype, Register scratch NOT_AARCH64_ARG(AsmCondition cond)) { 2471 assert((rtype == relocInfo::runtime_call_type) || (rtype == relocInfo::none), "not supported"); 2472 if (cache_fully_reachable()) { 2473 // Note: this assumes that all possible targets (the initial one 2474 // and the addressed patched to) are all in the code cache. 2475 assert(CodeCache::contains(target), "target might be too far"); 2476 relocate(rtype); 2477 b(target NOT_AARCH64_ARG(cond)); 2478 return; 2479 } 2480 2481 // Discard the relocation information if not needed for CacheCompiledCode 2482 // since the next encodings are all in absolute format. 2483 if (ignore_non_patchable_relocations()) { 2484 rtype = relocInfo::none; 2485 } 2486 2487 #ifdef AARCH64 2488 assert (scratch != noreg, "should be specified"); 2489 InlinedAddress address_literal(target); 2490 relocate(rtype); 2491 ldr_literal(scratch, address_literal); 2492 br(scratch); 2493 int off = offset(); 2494 bind_literal(address_literal); 2495 #ifdef COMPILER2 2496 if (offset() - off == wordSize) { 2497 // no padding, so insert nop for worst-case sizing 2498 nop(); 2499 } 2500 #endif 2501 #else 2502 { 2503 Label skip; 2504 InlinedAddress address_literal(target); 2505 if (cond != al) { 2506 b(skip, inverse(cond)); 2507 } 2508 relocate(rtype); 2509 ldr_literal(PC, address_literal); 2510 bind_literal(address_literal); 2511 bind(skip); 2512 } 2513 #endif // AARCH64 2514 } 2515 2516 void MacroAssembler::call(address target, RelocationHolder rspec NOT_AARCH64_ARG(AsmCondition cond)) { 2517 Register scratch = LR; 2518 assert(rspec.type() == relocInfo::runtime_call_type || rspec.type() == relocInfo::none, "not supported"); 2519 if (reachable_from_cache(target)) { 2520 relocate(rspec); 2521 bl(target NOT_AARCH64_ARG(cond)); 2522 return; 2523 } 2524 2525 // Note: relocate is not needed for the code below, 2526 // encoding targets in absolute format. 2527 if (ignore_non_patchable_relocations()) { 2528 // This assumes the information was needed only for relocating the code. 2529 rspec = RelocationHolder::none; 2530 } 2531 2532 #ifndef AARCH64 2533 if (VM_Version::supports_movw() && (rspec.type() == relocInfo::none)) { 2534 // Note: this version cannot be (atomically) patched 2535 mov_slow(scratch, (intptr_t)target, cond); 2536 blx(scratch, cond); 2537 return; 2538 } 2539 #endif 2540 2541 { 2542 Label ret_addr; 2543 #ifndef AARCH64 2544 if (cond != al) { 2545 b(ret_addr, inverse(cond)); 2546 } 2547 #endif 2548 2549 2550 #ifdef AARCH64 2551 // TODO-AARCH64: make more optimal implementation 2552 // [ Keep in sync with MacroAssembler::call_size ] 2553 assert(rspec.type() == relocInfo::none, "call reloc not implemented"); 2554 mov_slow(scratch, target); 2555 blr(scratch); 2556 #else 2557 InlinedAddress address_literal(target); 2558 relocate(rspec); 2559 adr(LR, ret_addr); 2560 ldr_literal(PC, address_literal); 2561 2562 bind_literal(address_literal); 2563 bind(ret_addr); 2564 #endif 2565 } 2566 } 2567 2568 #if defined(AARCH64) && defined(COMPILER2) 2569 int MacroAssembler::call_size(address target, bool far, bool patchable) { 2570 // FIXME: mov_slow is variable-length 2571 if (!far) return 1; // bl 2572 if (patchable) return 2; // ldr; blr 2573 return instr_count_for_mov_slow((intptr_t)target) + 1; 2574 } 2575 #endif 2576 2577 int MacroAssembler::patchable_call(address target, RelocationHolder const& rspec, bool c2) { 2578 assert(rspec.type() == relocInfo::static_call_type || 2579 rspec.type() == relocInfo::none || 2580 rspec.type() == relocInfo::opt_virtual_call_type, "not supported"); 2581 2582 // Always generate the relocation information, needed for patching 2583 relocate(rspec); // used by NativeCall::is_call_before() 2584 if (cache_fully_reachable()) { 2585 // Note: this assumes that all possible targets (the initial one 2586 // and the addresses patched to) are all in the code cache. 2587 assert(CodeCache::contains(target), "target might be too far"); 2588 bl(target); 2589 } else { 2590 #if defined(AARCH64) && defined(COMPILER2) 2591 if (c2) { 2592 // return address needs to match call_size(). 2593 // no need to trash Rtemp 2594 int off = offset(); 2595 Label skip_literal; 2596 InlinedAddress address_literal(target); 2597 ldr_literal(LR, address_literal); 2598 blr(LR); 2599 int ret_addr_offset = offset(); 2600 assert(offset() - off == call_size(target, true, true) * InstructionSize, "need to fix call_size()"); 2601 b(skip_literal); 2602 int off2 = offset(); 2603 bind_literal(address_literal); 2604 if (offset() - off2 == wordSize) { 2605 // no padding, so insert nop for worst-case sizing 2606 nop(); 2607 } 2608 bind(skip_literal); 2609 return ret_addr_offset; 2610 } 2611 #endif 2612 Label ret_addr; 2613 InlinedAddress address_literal(target); 2614 #ifdef AARCH64 2615 ldr_literal(Rtemp, address_literal); 2616 adr(LR, ret_addr); 2617 br(Rtemp); 2618 #else 2619 adr(LR, ret_addr); 2620 ldr_literal(PC, address_literal); 2621 #endif 2622 bind_literal(address_literal); 2623 bind(ret_addr); 2624 } 2625 return offset(); 2626 } 2627 2628 // ((OopHandle)result).resolve(); 2629 void MacroAssembler::resolve_oop_handle(Register result) { 2630 // OopHandle::resolve is an indirection. 2631 ldr(result, Address(result, 0)); 2632 } 2633 2634 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 2635 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2636 ldr(tmp, Address(method, Method::const_offset())); 2637 ldr(tmp, Address(tmp, ConstMethod::constants_offset())); 2638 ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes())); 2639 ldr(mirror, Address(tmp, mirror_offset)); 2640 resolve_oop_handle(mirror); 2641 } 2642 2643 2644 /////////////////////////////////////////////////////////////////////////////// 2645 2646 // Compressed pointers 2647 2648 #ifdef AARCH64 2649 2650 void MacroAssembler::load_klass(Register dst_klass, Register src_oop) { 2651 if (UseCompressedClassPointers) { 2652 ldr_w(dst_klass, Address(src_oop, oopDesc::klass_offset_in_bytes())); 2653 decode_klass_not_null(dst_klass); 2654 } else { 2655 ldr(dst_klass, Address(src_oop, oopDesc::klass_offset_in_bytes())); 2656 } 2657 } 2658 2659 #else 2660 2661 void MacroAssembler::load_klass(Register dst_klass, Register src_oop, AsmCondition cond) { 2662 ldr(dst_klass, Address(src_oop, oopDesc::klass_offset_in_bytes()), cond); 2663 } 2664 2665 #endif // AARCH64 2666 2667 // Blows src_klass. 2668 void MacroAssembler::store_klass(Register src_klass, Register dst_oop) { 2669 #ifdef AARCH64 2670 if (UseCompressedClassPointers) { 2671 assert(src_klass != dst_oop, "not enough registers"); 2672 encode_klass_not_null(src_klass); 2673 str_w(src_klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 2674 return; 2675 } 2676 #endif // AARCH64 2677 str(src_klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 2678 } 2679 2680 #ifdef AARCH64 2681 2682 void MacroAssembler::store_klass_gap(Register dst) { 2683 if (UseCompressedClassPointers) { 2684 str_w(ZR, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 2685 } 2686 } 2687 2688 #endif // AARCH64 2689 2690 2691 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) { 2692 access_load_at(T_OBJECT, IN_HEAP | decorators, src, dst, tmp1, tmp2, tmp3); 2693 } 2694 2695 // Blows src and flags. 2696 void MacroAssembler::store_heap_oop(Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) { 2697 access_store_at(T_OBJECT, IN_HEAP | decorators, obj, new_val, tmp1, tmp2, tmp3, false); 2698 } 2699 2700 void MacroAssembler::store_heap_oop_null(Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) { 2701 access_store_at(T_OBJECT, IN_HEAP, obj, new_val, tmp1, tmp2, tmp3, true); 2702 } 2703 2704 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 2705 Address src, Register dst, Register tmp1, Register tmp2, Register tmp3) { 2706 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2707 decorators = AccessInternal::decorator_fixup(decorators); 2708 bool as_raw = (decorators & AS_RAW) != 0; 2709 if (as_raw) { 2710 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3); 2711 } else { 2712 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3); 2713 } 2714 } 2715 2716 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 2717 Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) { 2718 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2719 decorators = AccessInternal::decorator_fixup(decorators); 2720 bool as_raw = (decorators & AS_RAW) != 0; 2721 if (as_raw) { 2722 bs->BarrierSetAssembler::store_at(this, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null); 2723 } else { 2724 bs->store_at(this, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null); 2725 } 2726 } 2727 2728 2729 #ifdef AARCH64 2730 2731 // Algorithm must match oop.inline.hpp encode_heap_oop. 2732 void MacroAssembler::encode_heap_oop(Register dst, Register src) { 2733 // This code pattern is matched in NativeIntruction::skip_encode_heap_oop. 2734 // Update it at modifications. 2735 assert (UseCompressedOops, "must be compressed"); 2736 assert (Universe::heap() != NULL, "java heap should be initialized"); 2737 #ifdef ASSERT 2738 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 2739 #endif 2740 verify_oop(src); 2741 if (Universe::narrow_oop_base() == NULL) { 2742 if (Universe::narrow_oop_shift() != 0) { 2743 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2744 _lsr(dst, src, Universe::narrow_oop_shift()); 2745 } else if (dst != src) { 2746 mov(dst, src); 2747 } 2748 } else { 2749 tst(src, src); 2750 csel(dst, Rheap_base, src, eq); 2751 sub(dst, dst, Rheap_base); 2752 if (Universe::narrow_oop_shift() != 0) { 2753 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2754 _lsr(dst, dst, Universe::narrow_oop_shift()); 2755 } 2756 } 2757 } 2758 2759 // Same algorithm as oop.inline.hpp decode_heap_oop. 2760 void MacroAssembler::decode_heap_oop(Register dst, Register src) { 2761 #ifdef ASSERT 2762 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 2763 #endif 2764 assert(Universe::narrow_oop_shift() == 0 || LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2765 if (Universe::narrow_oop_base() != NULL) { 2766 tst(src, src); 2767 add(dst, Rheap_base, AsmOperand(src, lsl, Universe::narrow_oop_shift())); 2768 csel(dst, dst, ZR, ne); 2769 } else { 2770 _lsl(dst, src, Universe::narrow_oop_shift()); 2771 } 2772 verify_oop(dst); 2773 } 2774 2775 #ifdef COMPILER2 2776 // Algorithm must match oop.inline.hpp encode_heap_oop. 2777 // Must preserve condition codes, or C2 encodeHeapOop_not_null rule 2778 // must be changed. 2779 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 2780 assert (UseCompressedOops, "must be compressed"); 2781 assert (Universe::heap() != NULL, "java heap should be initialized"); 2782 #ifdef ASSERT 2783 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 2784 #endif 2785 verify_oop(src); 2786 if (Universe::narrow_oop_base() == NULL) { 2787 if (Universe::narrow_oop_shift() != 0) { 2788 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2789 _lsr(dst, src, Universe::narrow_oop_shift()); 2790 } else if (dst != src) { 2791 mov(dst, src); 2792 } 2793 } else { 2794 sub(dst, src, Rheap_base); 2795 if (Universe::narrow_oop_shift() != 0) { 2796 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2797 _lsr(dst, dst, Universe::narrow_oop_shift()); 2798 } 2799 } 2800 } 2801 2802 // Same algorithm as oops.inline.hpp decode_heap_oop. 2803 // Must preserve condition codes, or C2 decodeHeapOop_not_null rule 2804 // must be changed. 2805 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 2806 #ifdef ASSERT 2807 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 2808 #endif 2809 assert(Universe::narrow_oop_shift() == 0 || LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 2810 if (Universe::narrow_oop_base() != NULL) { 2811 add(dst, Rheap_base, AsmOperand(src, lsl, Universe::narrow_oop_shift())); 2812 } else { 2813 _lsl(dst, src, Universe::narrow_oop_shift()); 2814 } 2815 verify_oop(dst); 2816 } 2817 2818 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 2819 assert(UseCompressedClassPointers, "should only be used for compressed header"); 2820 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 2821 int klass_index = oop_recorder()->find_index(k); 2822 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 2823 2824 // Relocation with special format (see relocInfo_arm.hpp). 2825 relocate(rspec); 2826 narrowKlass encoded_k = Klass::encode_klass(k); 2827 movz(dst, encoded_k & 0xffff, 0); 2828 movk(dst, (encoded_k >> 16) & 0xffff, 16); 2829 } 2830 2831 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 2832 assert(UseCompressedOops, "should only be used for compressed header"); 2833 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 2834 int oop_index = oop_recorder()->find_index(obj); 2835 RelocationHolder rspec = oop_Relocation::spec(oop_index); 2836 2837 relocate(rspec); 2838 movz(dst, 0xffff, 0); 2839 movk(dst, 0xffff, 16); 2840 } 2841 2842 #endif // COMPILER2 2843 // Must preserve condition codes, or C2 encodeKlass_not_null rule 2844 // must be changed. 2845 void MacroAssembler::encode_klass_not_null(Register r) { 2846 if (Universe::narrow_klass_base() != NULL) { 2847 // Use Rheap_base as a scratch register in which to temporarily load the narrow_klass_base. 2848 assert(r != Rheap_base, "Encoding a klass in Rheap_base"); 2849 mov_slow(Rheap_base, Universe::narrow_klass_base()); 2850 sub(r, r, Rheap_base); 2851 } 2852 if (Universe::narrow_klass_shift() != 0) { 2853 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 2854 _lsr(r, r, Universe::narrow_klass_shift()); 2855 } 2856 if (Universe::narrow_klass_base() != NULL) { 2857 reinit_heapbase(); 2858 } 2859 } 2860 2861 // Must preserve condition codes, or C2 encodeKlass_not_null rule 2862 // must be changed. 2863 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 2864 if (dst == src) { 2865 encode_klass_not_null(src); 2866 return; 2867 } 2868 if (Universe::narrow_klass_base() != NULL) { 2869 mov_slow(dst, (int64_t)Universe::narrow_klass_base()); 2870 sub(dst, src, dst); 2871 if (Universe::narrow_klass_shift() != 0) { 2872 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 2873 _lsr(dst, dst, Universe::narrow_klass_shift()); 2874 } 2875 } else { 2876 if (Universe::narrow_klass_shift() != 0) { 2877 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 2878 _lsr(dst, src, Universe::narrow_klass_shift()); 2879 } else { 2880 mov(dst, src); 2881 } 2882 } 2883 } 2884 2885 // Function instr_count_for_decode_klass_not_null() counts the instructions 2886 // generated by decode_klass_not_null(register r) and reinit_heapbase(), 2887 // when (Universe::heap() != NULL). Hence, if the instructions they 2888 // generate change, then this method needs to be updated. 2889 int MacroAssembler::instr_count_for_decode_klass_not_null() { 2890 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 2891 assert(Universe::heap() != NULL, "java heap should be initialized"); 2892 if (Universe::narrow_klass_base() != NULL) { 2893 return instr_count_for_mov_slow(Universe::narrow_klass_base()) + // mov_slow 2894 1 + // add 2895 instr_count_for_mov_slow(Universe::narrow_ptrs_base()); // reinit_heapbase() = mov_slow 2896 } else { 2897 if (Universe::narrow_klass_shift() != 0) { 2898 return 1; 2899 } 2900 } 2901 return 0; 2902 } 2903 2904 // Must preserve condition codes, or C2 decodeKlass_not_null rule 2905 // must be changed. 2906 void MacroAssembler::decode_klass_not_null(Register r) { 2907 int off = offset(); 2908 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 2909 assert(Universe::heap() != NULL, "java heap should be initialized"); 2910 assert(r != Rheap_base, "Decoding a klass in Rheap_base"); 2911 // Cannot assert, instr_count_for_decode_klass_not_null() counts instructions. 2912 // Also do not verify_oop as this is called by verify_oop. 2913 if (Universe::narrow_klass_base() != NULL) { 2914 // Use Rheap_base as a scratch register in which to temporarily load the narrow_klass_base. 2915 mov_slow(Rheap_base, Universe::narrow_klass_base()); 2916 add(r, Rheap_base, AsmOperand(r, lsl, Universe::narrow_klass_shift())); 2917 reinit_heapbase(); 2918 } else { 2919 if (Universe::narrow_klass_shift() != 0) { 2920 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 2921 _lsl(r, r, Universe::narrow_klass_shift()); 2922 } 2923 } 2924 assert((offset() - off) == (instr_count_for_decode_klass_not_null() * InstructionSize), "need to fix instr_count_for_decode_klass_not_null"); 2925 } 2926 2927 // Must preserve condition codes, or C2 decodeKlass_not_null rule 2928 // must be changed. 2929 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 2930 if (src == dst) { 2931 decode_klass_not_null(src); 2932 return; 2933 } 2934 2935 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 2936 assert(Universe::heap() != NULL, "java heap should be initialized"); 2937 assert(src != Rheap_base, "Decoding a klass in Rheap_base"); 2938 assert(dst != Rheap_base, "Decoding a klass into Rheap_base"); 2939 // Also do not verify_oop as this is called by verify_oop. 2940 if (Universe::narrow_klass_base() != NULL) { 2941 mov_slow(dst, Universe::narrow_klass_base()); 2942 add(dst, dst, AsmOperand(src, lsl, Universe::narrow_klass_shift())); 2943 } else { 2944 _lsl(dst, src, Universe::narrow_klass_shift()); 2945 } 2946 } 2947 2948 2949 void MacroAssembler::reinit_heapbase() { 2950 if (UseCompressedOops || UseCompressedClassPointers) { 2951 if (Universe::heap() != NULL) { 2952 mov_slow(Rheap_base, Universe::narrow_ptrs_base()); 2953 } else { 2954 ldr_global_ptr(Rheap_base, (address)Universe::narrow_ptrs_base_addr()); 2955 } 2956 } 2957 } 2958 2959 #ifdef ASSERT 2960 void MacroAssembler::verify_heapbase(const char* msg) { 2961 // This code pattern is matched in NativeIntruction::skip_verify_heapbase. 2962 // Update it at modifications. 2963 assert (UseCompressedOops, "should be compressed"); 2964 assert (Universe::heap() != NULL, "java heap should be initialized"); 2965 if (CheckCompressedOops) { 2966 Label ok; 2967 str(Rthread, Address(Rthread, in_bytes(JavaThread::in_top_frame_unsafe_section_offset()))); 2968 raw_push(Rtemp, ZR); 2969 mrs(Rtemp, Assembler::SysReg_NZCV); 2970 str(Rtemp, Address(SP, 1 * wordSize)); 2971 mov_slow(Rtemp, Universe::narrow_ptrs_base()); 2972 cmp(Rheap_base, Rtemp); 2973 b(ok, eq); 2974 stop(msg); 2975 bind(ok); 2976 ldr(Rtemp, Address(SP, 1 * wordSize)); 2977 msr(Assembler::SysReg_NZCV, Rtemp); 2978 raw_pop(Rtemp, ZR); 2979 str(ZR, Address(Rthread, in_bytes(JavaThread::in_top_frame_unsafe_section_offset()))); 2980 } 2981 } 2982 #endif // ASSERT 2983 2984 #endif // AARCH64 2985 2986 #ifdef COMPILER2 2987 void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2 AARCH64_ONLY_ARG(Register Rscratch3)) 2988 { 2989 assert(VM_Version::supports_ldrex(), "unsupported, yet?"); 2990 2991 Register Rmark = Rscratch2; 2992 2993 assert(Roop != Rscratch, ""); 2994 assert(Roop != Rmark, ""); 2995 assert(Rbox != Rscratch, ""); 2996 assert(Rbox != Rmark, ""); 2997 2998 Label fast_lock, done; 2999 3000 if (UseBiasedLocking && !UseOptoBiasInlining) { 3001 Label failed; 3002 #ifdef AARCH64 3003 biased_locking_enter(Roop, Rmark, Rscratch, false, Rscratch3, done, failed); 3004 #else 3005 biased_locking_enter(Roop, Rmark, Rscratch, false, noreg, done, failed); 3006 #endif 3007 bind(failed); 3008 } 3009 3010 ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes())); 3011 tst(Rmark, markOopDesc::unlocked_value); 3012 b(fast_lock, ne); 3013 3014 // Check for recursive lock 3015 // See comments in InterpreterMacroAssembler::lock_object for 3016 // explanations on the fast recursive locking check. 3017 #ifdef AARCH64 3018 intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size()); 3019 Assembler::LogicalImmediate imm(mask, false); 3020 mov(Rscratch, SP); 3021 sub(Rscratch, Rmark, Rscratch); 3022 ands(Rscratch, Rscratch, imm); 3023 // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8153107) 3024 str(Rscratch, Address(Rbox, BasicLock::displaced_header_offset_in_bytes())); 3025 b(done); 3026 3027 #else 3028 // -1- test low 2 bits 3029 movs(Rscratch, AsmOperand(Rmark, lsl, 30)); 3030 // -2- test (hdr - SP) if the low two bits are 0 3031 sub(Rscratch, Rmark, SP, eq); 3032 movs(Rscratch, AsmOperand(Rscratch, lsr, exact_log2(os::vm_page_size())), eq); 3033 // If still 'eq' then recursive locking OK 3034 // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8153107) 3035 str(Rscratch, Address(Rbox, BasicLock::displaced_header_offset_in_bytes())); 3036 b(done); 3037 #endif 3038 3039 bind(fast_lock); 3040 str(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes())); 3041 3042 bool allow_fallthrough_on_failure = true; 3043 bool one_shot = true; 3044 cas_for_lock_acquire(Rmark, Rbox, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot); 3045 3046 bind(done); 3047 3048 } 3049 3050 void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2 AARCH64_ONLY_ARG(Register Rscratch3)) 3051 { 3052 assert(VM_Version::supports_ldrex(), "unsupported, yet?"); 3053 3054 Register Rmark = Rscratch2; 3055 3056 assert(Roop != Rscratch, ""); 3057 assert(Roop != Rmark, ""); 3058 assert(Rbox != Rscratch, ""); 3059 assert(Rbox != Rmark, ""); 3060 3061 Label done; 3062 3063 if (UseBiasedLocking && !UseOptoBiasInlining) { 3064 biased_locking_exit(Roop, Rscratch, done); 3065 } 3066 3067 ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes())); 3068 // If hdr is NULL, we've got recursive locking and there's nothing more to do 3069 cmp(Rmark, 0); 3070 b(done, eq); 3071 3072 // Restore the object header 3073 bool allow_fallthrough_on_failure = true; 3074 bool one_shot = true; 3075 cas_for_lock_release(Rmark, Rbox, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot); 3076 3077 bind(done); 3078 3079 } 3080 #endif // COMPILER2