1 2 /* 3 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 4 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include <sys/types.h> 28 29 #include "precompiled.hpp" 30 #include "asm/assembler.hpp" 31 #include "asm/assembler.inline.hpp" 32 #include "interpreter/interpreter.hpp" 33 34 #include "compiler/disassembler.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "gc/shenandoah/brooksPointer.hpp" 37 #include "gc/shenandoah/shenandoahHeap.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "nativeInst_aarch64.hpp" 42 #include "oops/klass.inline.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "opto/compile.hpp" 45 #include "opto/intrinsicnode.hpp" 46 #include "opto/node.hpp" 47 #include "runtime/biasedLocking.hpp" 48 #include "runtime/icache.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/sharedRuntime.hpp" 51 #include "runtime/thread.hpp" 52 53 #if INCLUDE_ALL_GCS 54 #include "gc/g1/g1CollectedHeap.inline.hpp" 55 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 56 #include "gc/g1/heapRegion.hpp" 57 #endif 58 59 #ifdef PRODUCT 60 #define BLOCK_COMMENT(str) /* nothing */ 61 #define STOP(error) stop(error) 62 #else 63 #define BLOCK_COMMENT(str) block_comment(str) 64 #define STOP(error) block_comment(error); stop(error) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 69 // Patch any kind of instruction; there may be several instructions. 70 // Return the total length (in bytes) of the instructions. 71 int MacroAssembler::pd_patch_instruction_size(address branch, address target) { 72 int instructions = 1; 73 assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); 74 long offset = (target - branch) >> 2; 75 unsigned insn = *(unsigned*)branch; 76 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { 77 // Load register (literal) 78 Instruction_aarch64::spatch(branch, 23, 5, offset); 79 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 80 // Unconditional branch (immediate) 81 Instruction_aarch64::spatch(branch, 25, 0, offset); 82 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 83 // Conditional branch (immediate) 84 Instruction_aarch64::spatch(branch, 23, 5, offset); 85 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 86 // Compare & branch (immediate) 87 Instruction_aarch64::spatch(branch, 23, 5, offset); 88 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 89 // Test & branch (immediate) 90 Instruction_aarch64::spatch(branch, 18, 5, offset); 91 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 92 // PC-rel. addressing 93 offset = target-branch; 94 int shift = Instruction_aarch64::extract(insn, 31, 31); 95 if (shift) { 96 u_int64_t dest = (u_int64_t)target; 97 uint64_t pc_page = (uint64_t)branch >> 12; 98 uint64_t adr_page = (uint64_t)target >> 12; 99 unsigned offset_lo = dest & 0xfff; 100 offset = adr_page - pc_page; 101 102 // We handle 4 types of PC relative addressing 103 // 1 - adrp Rx, target_page 104 // ldr/str Ry, [Rx, #offset_in_page] 105 // 2 - adrp Rx, target_page 106 // add Ry, Rx, #offset_in_page 107 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 108 // movk Rx, #imm16<<32 109 // 4 - adrp Rx, target_page (page aligned reloc, offset == 0) 110 // In the first 3 cases we must check that Rx is the same in the adrp and the 111 // subsequent ldr/str, add or movk instruction. Otherwise we could accidentally end 112 // up treating a type 4 relocation as a type 1, 2 or 3 just because it happened 113 // to be followed by a random unrelated ldr/str, add or movk instruction. 114 // 115 unsigned insn2 = ((unsigned*)branch)[1]; 116 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 117 Instruction_aarch64::extract(insn, 4, 0) == 118 Instruction_aarch64::extract(insn2, 9, 5)) { 119 // Load/store register (unsigned immediate) 120 unsigned size = Instruction_aarch64::extract(insn2, 31, 30); 121 Instruction_aarch64::patch(branch + sizeof (unsigned), 122 21, 10, offset_lo >> size); 123 guarantee(((dest >> size) << size) == dest, "misaligned target"); 124 instructions = 2; 125 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 126 Instruction_aarch64::extract(insn, 4, 0) == 127 Instruction_aarch64::extract(insn2, 4, 0)) { 128 // add (immediate) 129 Instruction_aarch64::patch(branch + sizeof (unsigned), 130 21, 10, offset_lo); 131 instructions = 2; 132 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 133 Instruction_aarch64::extract(insn, 4, 0) == 134 Instruction_aarch64::extract(insn2, 4, 0)) { 135 // movk #imm16<<32 136 Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32); 137 long dest = ((long)target & 0xffffffffL) | ((long)branch & 0xffff00000000L); 138 long pc_page = (long)branch >> 12; 139 long adr_page = (long)dest >> 12; 140 offset = adr_page - pc_page; 141 instructions = 2; 142 } 143 } 144 int offset_lo = offset & 3; 145 offset >>= 2; 146 Instruction_aarch64::spatch(branch, 23, 5, offset); 147 Instruction_aarch64::patch(branch, 30, 29, offset_lo); 148 } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { 149 u_int64_t dest = (u_int64_t)target; 150 // Move wide constant 151 assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); 152 assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); 153 Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff); 154 Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff); 155 Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff); 156 assert(target_addr_for_insn(branch) == target, "should be"); 157 instructions = 3; 158 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 159 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 160 // nothing to do 161 assert(target == 0, "did not expect to relocate target for polling page load"); 162 } else { 163 ShouldNotReachHere(); 164 } 165 return instructions * NativeInstruction::instruction_size; 166 } 167 168 int MacroAssembler::patch_oop(address insn_addr, address o) { 169 int instructions; 170 unsigned insn = *(unsigned*)insn_addr; 171 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 172 173 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 174 // narrow OOPs by setting the upper 16 bits in the first 175 // instruction. 176 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 177 // Move narrow OOP 178 narrowOop n = oopDesc::encode_heap_oop((oop)o); 179 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 180 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 181 instructions = 2; 182 } else { 183 // Move wide OOP 184 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 185 uintptr_t dest = (uintptr_t)o; 186 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 187 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 188 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 189 instructions = 3; 190 } 191 return instructions * NativeInstruction::instruction_size; 192 } 193 194 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { 195 long offset = 0; 196 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { 197 // Load register (literal) 198 offset = Instruction_aarch64::sextract(insn, 23, 5); 199 return address(((uint64_t)insn_addr + (offset << 2))); 200 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 201 // Unconditional branch (immediate) 202 offset = Instruction_aarch64::sextract(insn, 25, 0); 203 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 204 // Conditional branch (immediate) 205 offset = Instruction_aarch64::sextract(insn, 23, 5); 206 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 207 // Compare & branch (immediate) 208 offset = Instruction_aarch64::sextract(insn, 23, 5); 209 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 210 // Test & branch (immediate) 211 offset = Instruction_aarch64::sextract(insn, 18, 5); 212 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 213 // PC-rel. addressing 214 offset = Instruction_aarch64::extract(insn, 30, 29); 215 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2; 216 int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0; 217 if (shift) { 218 offset <<= shift; 219 uint64_t target_page = ((uint64_t)insn_addr) + offset; 220 target_page &= ((uint64_t)-1) << shift; 221 // Return the target address for the following sequences 222 // 1 - adrp Rx, target_page 223 // ldr/str Ry, [Rx, #offset_in_page] 224 // 2 - adrp Rx, target_page 225 // add Ry, Rx, #offset_in_page 226 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 227 // movk Rx, #imm12<<32 228 // 4 - adrp Rx, target_page (page aligned reloc, offset == 0) 229 // 230 // In the first two cases we check that the register is the same and 231 // return the target_page + the offset within the page. 232 // Otherwise we assume it is a page aligned relocation and return 233 // the target page only. 234 // 235 unsigned insn2 = ((unsigned*)insn_addr)[1]; 236 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 237 Instruction_aarch64::extract(insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 9, 5)) { 239 // Load/store register (unsigned immediate) 240 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 241 unsigned int size = Instruction_aarch64::extract(insn2, 31, 30); 242 return address(target_page + (byte_offset << size)); 243 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 244 Instruction_aarch64::extract(insn, 4, 0) == 245 Instruction_aarch64::extract(insn2, 4, 0)) { 246 // add (immediate) 247 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 248 return address(target_page + byte_offset); 249 } else { 250 if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 251 Instruction_aarch64::extract(insn, 4, 0) == 252 Instruction_aarch64::extract(insn2, 4, 0)) { 253 target_page = (target_page & 0xffffffff) | 254 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 255 } 256 return (address)target_page; 257 } 258 } else { 259 ShouldNotReachHere(); 260 } 261 } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { 262 u_int32_t *insns = (u_int32_t *)insn_addr; 263 // Move wide constant: movz, movk, movk. See movptr(). 264 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 265 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 266 return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) 267 + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 268 + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 269 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 270 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 271 return 0; 272 } else { 273 ShouldNotReachHere(); 274 } 275 return address(((uint64_t)insn_addr + (offset << 2))); 276 } 277 278 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 279 dsb(Assembler::SY); 280 } 281 282 283 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 284 // we must set sp to zero to clear frame 285 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 286 287 // must clear fp, so that compiled frames are not confused; it is 288 // possible that we need it only for debugging 289 if (clear_fp) { 290 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 291 } 292 293 // Always clear the pc because it could have been set by make_walkable() 294 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 295 } 296 297 // Calls to C land 298 // 299 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 300 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 301 // has to be reset to 0. This is required to allow proper stack traversal. 302 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 303 Register last_java_fp, 304 Register last_java_pc, 305 Register scratch) { 306 307 if (last_java_pc->is_valid()) { 308 str(last_java_pc, Address(rthread, 309 JavaThread::frame_anchor_offset() 310 + JavaFrameAnchor::last_Java_pc_offset())); 311 } 312 313 // determine last_java_sp register 314 if (last_java_sp == sp) { 315 mov(scratch, sp); 316 last_java_sp = scratch; 317 } else if (!last_java_sp->is_valid()) { 318 last_java_sp = esp; 319 } 320 321 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 322 323 // last_java_fp is optional 324 if (last_java_fp->is_valid()) { 325 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 326 } 327 } 328 329 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 330 Register last_java_fp, 331 address last_java_pc, 332 Register scratch) { 333 if (last_java_pc != NULL) { 334 adr(scratch, last_java_pc); 335 } else { 336 // FIXME: This is almost never correct. We should delete all 337 // cases of set_last_Java_frame with last_java_pc=NULL and use the 338 // correct return address instead. 339 adr(scratch, pc()); 340 } 341 342 str(scratch, Address(rthread, 343 JavaThread::frame_anchor_offset() 344 + JavaFrameAnchor::last_Java_pc_offset())); 345 346 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 347 } 348 349 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 350 Register last_java_fp, 351 Label &L, 352 Register scratch) { 353 if (L.is_bound()) { 354 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 355 } else { 356 InstructionMark im(this); 357 L.add_patch_at(code(), locator()); 358 set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch); 359 } 360 } 361 362 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { 363 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 364 assert(CodeCache::find_blob(entry.target()) != NULL, 365 "destination of far call not found in code cache"); 366 if (far_branches()) { 367 unsigned long offset; 368 // We can use ADRP here because we know that the total size of 369 // the code cache cannot exceed 2Gb. 370 adrp(tmp, entry, offset); 371 add(tmp, tmp, offset); 372 if (cbuf) cbuf->set_insts_mark(); 373 blr(tmp); 374 } else { 375 if (cbuf) cbuf->set_insts_mark(); 376 bl(entry); 377 } 378 } 379 380 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { 381 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 382 assert(CodeCache::find_blob(entry.target()) != NULL, 383 "destination of far call not found in code cache"); 384 if (far_branches()) { 385 unsigned long offset; 386 // We can use ADRP here because we know that the total size of 387 // the code cache cannot exceed 2Gb. 388 adrp(tmp, entry, offset); 389 add(tmp, tmp, offset); 390 if (cbuf) cbuf->set_insts_mark(); 391 br(tmp); 392 } else { 393 if (cbuf) cbuf->set_insts_mark(); 394 b(entry); 395 } 396 } 397 398 int MacroAssembler::biased_locking_enter(Register lock_reg, 399 Register obj_reg, 400 Register swap_reg, 401 Register tmp_reg, 402 bool swap_reg_contains_mark, 403 Label& done, 404 Label* slow_case, 405 BiasedLockingCounters* counters) { 406 assert(UseBiasedLocking, "why call this otherwise?"); 407 assert_different_registers(lock_reg, obj_reg, swap_reg); 408 409 if (PrintBiasedLockingStatistics && counters == NULL) 410 counters = BiasedLocking::counters(); 411 412 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg); 413 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 414 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 415 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); 416 Address saved_mark_addr(lock_reg, 0); 417 418 // Biased locking 419 // See whether the lock is currently biased toward our thread and 420 // whether the epoch is still valid 421 // Note that the runtime guarantees sufficient alignment of JavaThread 422 // pointers to allow age to be placed into low bits 423 // First check to see whether biasing is even enabled for this object 424 Label cas_label; 425 int null_check_offset = -1; 426 if (!swap_reg_contains_mark) { 427 null_check_offset = offset(); 428 ldr(swap_reg, mark_addr); 429 } 430 andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); 431 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 432 br(Assembler::NE, cas_label); 433 // The bias pattern is present in the object's header. Need to check 434 // whether the bias owner and the epoch are both still current. 435 load_prototype_header(tmp_reg, obj_reg); 436 orr(tmp_reg, tmp_reg, rthread); 437 eor(tmp_reg, swap_reg, tmp_reg); 438 andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place)); 439 if (counters != NULL) { 440 Label around; 441 cbnz(tmp_reg, around); 442 atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1, rscratch2); 443 b(done); 444 bind(around); 445 } else { 446 cbz(tmp_reg, done); 447 } 448 449 Label try_revoke_bias; 450 Label try_rebias; 451 452 // At this point we know that the header has the bias pattern and 453 // that we are not the bias owner in the current epoch. We need to 454 // figure out more details about the state of the header in order to 455 // know what operations can be legally performed on the object's 456 // header. 457 458 // If the low three bits in the xor result aren't clear, that means 459 // the prototype header is no longer biased and we have to revoke 460 // the bias on this object. 461 andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place); 462 cbnz(rscratch1, try_revoke_bias); 463 464 // Biasing is still enabled for this data type. See whether the 465 // epoch of the current bias is still valid, meaning that the epoch 466 // bits of the mark word are equal to the epoch bits of the 467 // prototype header. (Note that the prototype header's epoch bits 468 // only change at a safepoint.) If not, attempt to rebias the object 469 // toward the current thread. Note that we must be absolutely sure 470 // that the current epoch is invalid in order to do this because 471 // otherwise the manipulations it performs on the mark word are 472 // illegal. 473 andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place); 474 cbnz(rscratch1, try_rebias); 475 476 // The epoch of the current bias is still valid but we know nothing 477 // about the owner; it might be set or it might be clear. Try to 478 // acquire the bias of the object using an atomic operation. If this 479 // fails we will go in to the runtime to revoke the object's bias. 480 // Note that we first construct the presumed unbiased header so we 481 // don't accidentally blow away another thread's valid bias. 482 { 483 Label here; 484 mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 485 andr(swap_reg, swap_reg, rscratch1); 486 orr(tmp_reg, swap_reg, rthread); 487 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 488 // If the biasing toward our thread failed, this means that 489 // another thread succeeded in biasing it toward itself and we 490 // need to revoke that bias. The revocation will occur in the 491 // interpreter runtime in the slow case. 492 bind(here); 493 if (counters != NULL) { 494 atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), 495 tmp_reg, rscratch1, rscratch2); 496 } 497 } 498 b(done); 499 500 bind(try_rebias); 501 // At this point we know the epoch has expired, meaning that the 502 // current "bias owner", if any, is actually invalid. Under these 503 // circumstances _only_, we are allowed to use the current header's 504 // value as the comparison value when doing the cas to acquire the 505 // bias in the current epoch. In other words, we allow transfer of 506 // the bias from one thread to another directly in this situation. 507 // 508 // FIXME: due to a lack of registers we currently blow away the age 509 // bits in this situation. Should attempt to preserve them. 510 { 511 Label here; 512 load_prototype_header(tmp_reg, obj_reg); 513 orr(tmp_reg, rthread, tmp_reg); 514 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 515 // If the biasing toward our thread failed, then another thread 516 // succeeded in biasing it toward itself and we need to revoke that 517 // bias. The revocation will occur in the runtime in the slow case. 518 bind(here); 519 if (counters != NULL) { 520 atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), 521 tmp_reg, rscratch1, rscratch2); 522 } 523 } 524 b(done); 525 526 bind(try_revoke_bias); 527 // The prototype mark in the klass doesn't have the bias bit set any 528 // more, indicating that objects of this data type are not supposed 529 // to be biased any more. We are going to try to reset the mark of 530 // this object to the prototype value and fall through to the 531 // CAS-based locking scheme. Note that if our CAS fails, it means 532 // that another thread raced us for the privilege of revoking the 533 // bias of this particular object, so it's okay to continue in the 534 // normal locking code. 535 // 536 // FIXME: due to a lack of registers we currently blow away the age 537 // bits in this situation. Should attempt to preserve them. 538 { 539 Label here, nope; 540 load_prototype_header(tmp_reg, obj_reg); 541 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); 542 bind(here); 543 544 // Fall through to the normal CAS-based lock, because no matter what 545 // the result of the above CAS, some thread must have succeeded in 546 // removing the bias bit from the object's header. 547 if (counters != NULL) { 548 atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg, 549 rscratch1, rscratch2); 550 } 551 bind(nope); 552 } 553 554 bind(cas_label); 555 556 return null_check_offset; 557 } 558 559 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 560 assert(UseBiasedLocking, "why call this otherwise?"); 561 562 // Check for biased locking unlock case, which is a no-op 563 // Note: we do not have to check the thread ID for two reasons. 564 // First, the interpreter checks for IllegalMonitorStateException at 565 // a higher level. Second, if the bias was revoked while we held the 566 // lock, the object could not be rebiased toward another thread, so 567 // the bias bit would be clear. 568 ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 569 andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 570 cmp(temp_reg, markOopDesc::biased_lock_pattern); 571 br(Assembler::EQ, done); 572 } 573 574 static void pass_arg0(MacroAssembler* masm, Register arg) { 575 if (c_rarg0 != arg ) { 576 masm->mov(c_rarg0, arg); 577 } 578 } 579 580 static void pass_arg1(MacroAssembler* masm, Register arg) { 581 if (c_rarg1 != arg ) { 582 masm->mov(c_rarg1, arg); 583 } 584 } 585 586 static void pass_arg2(MacroAssembler* masm, Register arg) { 587 if (c_rarg2 != arg ) { 588 masm->mov(c_rarg2, arg); 589 } 590 } 591 592 static void pass_arg3(MacroAssembler* masm, Register arg) { 593 if (c_rarg3 != arg ) { 594 masm->mov(c_rarg3, arg); 595 } 596 } 597 598 void MacroAssembler::call_VM_base(Register oop_result, 599 Register java_thread, 600 Register last_java_sp, 601 address entry_point, 602 int number_of_arguments, 603 bool check_exceptions) { 604 // determine java_thread register 605 if (!java_thread->is_valid()) { 606 java_thread = rthread; 607 } 608 609 // determine last_java_sp register 610 if (!last_java_sp->is_valid()) { 611 last_java_sp = esp; 612 } 613 614 // debugging support 615 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 616 assert(java_thread == rthread, "unexpected register"); 617 #ifdef ASSERT 618 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 619 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 620 #endif // ASSERT 621 622 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 623 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 624 625 // push java thread (becomes first argument of C function) 626 627 mov(c_rarg0, java_thread); 628 629 // set last Java frame before call 630 assert(last_java_sp != rfp, "can't use rfp"); 631 632 Label l; 633 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 634 635 // do the call, remove parameters 636 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 637 638 // reset last Java frame 639 // Only interpreter should have to clear fp 640 reset_last_Java_frame(true); 641 642 // C++ interp handles this in the interpreter 643 check_and_handle_popframe(java_thread); 644 check_and_handle_earlyret(java_thread); 645 646 if (check_exceptions) { 647 // check for pending exceptions (java_thread is set upon return) 648 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 649 Label ok; 650 cbz(rscratch1, ok); 651 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 652 br(rscratch1); 653 bind(ok); 654 } 655 656 // get oop result if there is one and reset the value in the thread 657 if (oop_result->is_valid()) { 658 get_vm_result(oop_result, java_thread); 659 } 660 } 661 662 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 663 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 664 } 665 666 // Maybe emit a call via a trampoline. If the code cache is small 667 // trampolines won't be emitted. 668 669 address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) { 670 assert(entry.rspec().type() == relocInfo::runtime_call_type 671 || entry.rspec().type() == relocInfo::opt_virtual_call_type 672 || entry.rspec().type() == relocInfo::static_call_type 673 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 674 675 unsigned int start_offset = offset(); 676 if (far_branches() && !Compile::current()->in_scratch_emit_size()) { 677 address stub = emit_trampoline_stub(start_offset, entry.target()); 678 if (stub == NULL) { 679 return NULL; // CodeCache is full 680 } 681 } 682 683 if (cbuf) cbuf->set_insts_mark(); 684 relocate(entry.rspec()); 685 if (!far_branches()) { 686 bl(entry.target()); 687 } else { 688 bl(pc()); 689 } 690 // just need to return a non-null address 691 return pc(); 692 } 693 694 695 // Emit a trampoline stub for a call to a target which is too far away. 696 // 697 // code sequences: 698 // 699 // call-site: 700 // branch-and-link to <destination> or <trampoline stub> 701 // 702 // Related trampoline stub for this call site in the stub section: 703 // load the call target from the constant pool 704 // branch (LR still points to the call site above) 705 706 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 707 address dest) { 708 address stub = start_a_stub(Compile::MAX_stubs_size/2); 709 if (stub == NULL) { 710 return NULL; // CodeBuffer::expand failed 711 } 712 713 // Create a trampoline stub relocation which relates this trampoline stub 714 // with the call instruction at insts_call_instruction_offset in the 715 // instructions code-section. 716 align(wordSize); 717 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 718 + insts_call_instruction_offset)); 719 const int stub_start_offset = offset(); 720 721 // Now, create the trampoline stub's code: 722 // - load the call 723 // - call 724 Label target; 725 ldr(rscratch1, target); 726 br(rscratch1); 727 bind(target); 728 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 729 "should be"); 730 emit_int64((int64_t)dest); 731 732 const address stub_start_addr = addr_at(stub_start_offset); 733 734 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 735 736 end_a_stub(); 737 return stub; 738 } 739 740 address MacroAssembler::ic_call(address entry, jint method_index) { 741 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 742 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 743 // unsigned long offset; 744 // ldr_constant(rscratch2, const_ptr); 745 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 746 return trampoline_call(Address(entry, rh)); 747 } 748 749 // Implementation of call_VM versions 750 751 void MacroAssembler::call_VM(Register oop_result, 752 address entry_point, 753 bool check_exceptions) { 754 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 755 } 756 757 void MacroAssembler::call_VM(Register oop_result, 758 address entry_point, 759 Register arg_1, 760 bool check_exceptions) { 761 pass_arg1(this, arg_1); 762 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 763 } 764 765 void MacroAssembler::call_VM(Register oop_result, 766 address entry_point, 767 Register arg_1, 768 Register arg_2, 769 bool check_exceptions) { 770 assert(arg_1 != c_rarg2, "smashed arg"); 771 pass_arg2(this, arg_2); 772 pass_arg1(this, arg_1); 773 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 774 } 775 776 void MacroAssembler::call_VM(Register oop_result, 777 address entry_point, 778 Register arg_1, 779 Register arg_2, 780 Register arg_3, 781 bool check_exceptions) { 782 assert(arg_1 != c_rarg3, "smashed arg"); 783 assert(arg_2 != c_rarg3, "smashed arg"); 784 pass_arg3(this, arg_3); 785 786 assert(arg_1 != c_rarg2, "smashed arg"); 787 pass_arg2(this, arg_2); 788 789 pass_arg1(this, arg_1); 790 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 791 } 792 793 void MacroAssembler::call_VM(Register oop_result, 794 Register last_java_sp, 795 address entry_point, 796 int number_of_arguments, 797 bool check_exceptions) { 798 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 799 } 800 801 void MacroAssembler::call_VM(Register oop_result, 802 Register last_java_sp, 803 address entry_point, 804 Register arg_1, 805 bool check_exceptions) { 806 pass_arg1(this, arg_1); 807 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 808 } 809 810 void MacroAssembler::call_VM(Register oop_result, 811 Register last_java_sp, 812 address entry_point, 813 Register arg_1, 814 Register arg_2, 815 bool check_exceptions) { 816 817 assert(arg_1 != c_rarg2, "smashed arg"); 818 pass_arg2(this, arg_2); 819 pass_arg1(this, arg_1); 820 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 821 } 822 823 void MacroAssembler::call_VM(Register oop_result, 824 Register last_java_sp, 825 address entry_point, 826 Register arg_1, 827 Register arg_2, 828 Register arg_3, 829 bool check_exceptions) { 830 assert(arg_1 != c_rarg3, "smashed arg"); 831 assert(arg_2 != c_rarg3, "smashed arg"); 832 pass_arg3(this, arg_3); 833 assert(arg_1 != c_rarg2, "smashed arg"); 834 pass_arg2(this, arg_2); 835 pass_arg1(this, arg_1); 836 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 837 } 838 839 840 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 841 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 842 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 843 verify_oop(oop_result, "broken oop in call_VM_base"); 844 } 845 846 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 847 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 848 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 849 } 850 851 void MacroAssembler::align(int modulus) { 852 while (offset() % modulus != 0) nop(); 853 } 854 855 // these are no-ops overridden by InterpreterMacroAssembler 856 857 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 858 859 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 860 861 862 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 863 Register tmp, 864 int offset) { 865 intptr_t value = *delayed_value_addr; 866 if (value != 0) 867 return RegisterOrConstant(value + offset); 868 869 // load indirectly to solve generation ordering problem 870 ldr(tmp, ExternalAddress((address) delayed_value_addr)); 871 872 if (offset != 0) 873 add(tmp, tmp, offset); 874 875 return RegisterOrConstant(tmp); 876 } 877 878 879 void MacroAssembler:: notify(int type) { 880 if (type == bytecode_start) { 881 // set_last_Java_frame(esp, rfp, (address)NULL); 882 Assembler:: notify(type); 883 // reset_last_Java_frame(true); 884 } 885 else 886 Assembler:: notify(type); 887 } 888 889 // Look up the method for a megamorphic invokeinterface call. 890 // The target method is determined by <intf_klass, itable_index>. 891 // The receiver klass is in recv_klass. 892 // On success, the result will be in method_result, and execution falls through. 893 // On failure, execution transfers to the given label. 894 void MacroAssembler::lookup_interface_method(Register recv_klass, 895 Register intf_klass, 896 RegisterOrConstant itable_index, 897 Register method_result, 898 Register scan_temp, 899 Label& L_no_such_interface) { 900 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 901 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 902 "caller must use same register for non-constant itable index as for method"); 903 904 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 905 int vtable_base = in_bytes(Klass::vtable_start_offset()); 906 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 907 int scan_step = itableOffsetEntry::size() * wordSize; 908 int vte_size = vtableEntry::size_in_bytes(); 909 assert(vte_size == wordSize, "else adjust times_vte_scale"); 910 911 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 912 913 // %%% Could store the aligned, prescaled offset in the klassoop. 914 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 915 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 916 add(scan_temp, scan_temp, vtable_base); 917 918 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 919 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 920 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 921 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 922 if (itentry_off) 923 add(recv_klass, recv_klass, itentry_off); 924 925 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 926 // if (scan->interface() == intf) { 927 // result = (klass + scan->offset() + itable_index); 928 // } 929 // } 930 Label search, found_method; 931 932 for (int peel = 1; peel >= 0; peel--) { 933 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 934 cmp(intf_klass, method_result); 935 936 if (peel) { 937 br(Assembler::EQ, found_method); 938 } else { 939 br(Assembler::NE, search); 940 // (invert the test to fall through to found_method...) 941 } 942 943 if (!peel) break; 944 945 bind(search); 946 947 // Check that the previous entry is non-null. A null entry means that 948 // the receiver class doesn't implement the interface, and wasn't the 949 // same as when the caller was compiled. 950 cbz(method_result, L_no_such_interface); 951 add(scan_temp, scan_temp, scan_step); 952 } 953 954 bind(found_method); 955 956 // Got a hit. 957 ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 958 ldr(method_result, Address(recv_klass, scan_temp)); 959 } 960 961 // virtual method calling 962 void MacroAssembler::lookup_virtual_method(Register recv_klass, 963 RegisterOrConstant vtable_index, 964 Register method_result) { 965 const int base = in_bytes(Klass::vtable_start_offset()); 966 assert(vtableEntry::size() * wordSize == 8, 967 "adjust the scaling in the code below"); 968 int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); 969 970 if (vtable_index.is_register()) { 971 lea(method_result, Address(recv_klass, 972 vtable_index.as_register(), 973 Address::lsl(LogBytesPerWord))); 974 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 975 } else { 976 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 977 ldr(method_result, Address(recv_klass, vtable_offset_in_bytes)); 978 } 979 } 980 981 void MacroAssembler::check_klass_subtype(Register sub_klass, 982 Register super_klass, 983 Register temp_reg, 984 Label& L_success) { 985 Label L_failure; 986 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 987 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 988 bind(L_failure); 989 } 990 991 992 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 993 Register super_klass, 994 Register temp_reg, 995 Label* L_success, 996 Label* L_failure, 997 Label* L_slow_path, 998 RegisterOrConstant super_check_offset) { 999 assert_different_registers(sub_klass, super_klass, temp_reg); 1000 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1001 if (super_check_offset.is_register()) { 1002 assert_different_registers(sub_klass, super_klass, 1003 super_check_offset.as_register()); 1004 } else if (must_load_sco) { 1005 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1006 } 1007 1008 Label L_fallthrough; 1009 int label_nulls = 0; 1010 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1011 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1012 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1013 assert(label_nulls <= 1, "at most one NULL in the batch"); 1014 1015 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1016 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1017 Address super_check_offset_addr(super_klass, sco_offset); 1018 1019 // Hacked jmp, which may only be used just before L_fallthrough. 1020 #define final_jmp(label) \ 1021 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1022 else b(label) /*omit semi*/ 1023 1024 // If the pointers are equal, we are done (e.g., String[] elements). 1025 // This self-check enables sharing of secondary supertype arrays among 1026 // non-primary types such as array-of-interface. Otherwise, each such 1027 // type would need its own customized SSA. 1028 // We move this check to the front of the fast path because many 1029 // type checks are in fact trivially successful in this manner, 1030 // so we get a nicely predicted branch right at the start of the check. 1031 cmp(sub_klass, super_klass); 1032 br(Assembler::EQ, *L_success); 1033 1034 // Check the supertype display: 1035 if (must_load_sco) { 1036 ldrw(temp_reg, super_check_offset_addr); 1037 super_check_offset = RegisterOrConstant(temp_reg); 1038 } 1039 Address super_check_addr(sub_klass, super_check_offset); 1040 ldr(rscratch1, super_check_addr); 1041 cmp(super_klass, rscratch1); // load displayed supertype 1042 1043 // This check has worked decisively for primary supers. 1044 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1045 // (Secondary supers are interfaces and very deeply nested subtypes.) 1046 // This works in the same check above because of a tricky aliasing 1047 // between the super_cache and the primary super display elements. 1048 // (The 'super_check_addr' can address either, as the case requires.) 1049 // Note that the cache is updated below if it does not help us find 1050 // what we need immediately. 1051 // So if it was a primary super, we can just fail immediately. 1052 // Otherwise, it's the slow path for us (no success at this point). 1053 1054 if (super_check_offset.is_register()) { 1055 br(Assembler::EQ, *L_success); 1056 cmp(super_check_offset.as_register(), sc_offset); 1057 if (L_failure == &L_fallthrough) { 1058 br(Assembler::EQ, *L_slow_path); 1059 } else { 1060 br(Assembler::NE, *L_failure); 1061 final_jmp(*L_slow_path); 1062 } 1063 } else if (super_check_offset.as_constant() == sc_offset) { 1064 // Need a slow path; fast failure is impossible. 1065 if (L_slow_path == &L_fallthrough) { 1066 br(Assembler::EQ, *L_success); 1067 } else { 1068 br(Assembler::NE, *L_slow_path); 1069 final_jmp(*L_success); 1070 } 1071 } else { 1072 // No slow path; it's a fast decision. 1073 if (L_failure == &L_fallthrough) { 1074 br(Assembler::EQ, *L_success); 1075 } else { 1076 br(Assembler::NE, *L_failure); 1077 final_jmp(*L_success); 1078 } 1079 } 1080 1081 bind(L_fallthrough); 1082 1083 #undef final_jmp 1084 } 1085 1086 // These two are taken from x86, but they look generally useful 1087 1088 // scans count pointer sized words at [addr] for occurence of value, 1089 // generic 1090 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1091 Register scratch) { 1092 Label Lloop, Lexit; 1093 cbz(count, Lexit); 1094 bind(Lloop); 1095 ldr(scratch, post(addr, wordSize)); 1096 cmp(value, scratch); 1097 br(EQ, Lexit); 1098 sub(count, count, 1); 1099 cbnz(count, Lloop); 1100 bind(Lexit); 1101 } 1102 1103 // scans count 4 byte words at [addr] for occurence of value, 1104 // generic 1105 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1106 Register scratch) { 1107 Label Lloop, Lexit; 1108 cbz(count, Lexit); 1109 bind(Lloop); 1110 ldrw(scratch, post(addr, wordSize)); 1111 cmpw(value, scratch); 1112 br(EQ, Lexit); 1113 sub(count, count, 1); 1114 cbnz(count, Lloop); 1115 bind(Lexit); 1116 } 1117 1118 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1119 Register super_klass, 1120 Register temp_reg, 1121 Register temp2_reg, 1122 Label* L_success, 1123 Label* L_failure, 1124 bool set_cond_codes) { 1125 assert_different_registers(sub_klass, super_klass, temp_reg); 1126 if (temp2_reg != noreg) 1127 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1128 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1129 1130 Label L_fallthrough; 1131 int label_nulls = 0; 1132 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1133 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1134 assert(label_nulls <= 1, "at most one NULL in the batch"); 1135 1136 // a couple of useful fields in sub_klass: 1137 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1138 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1139 Address secondary_supers_addr(sub_klass, ss_offset); 1140 Address super_cache_addr( sub_klass, sc_offset); 1141 1142 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1143 1144 // Do a linear scan of the secondary super-klass chain. 1145 // This code is rarely used, so simplicity is a virtue here. 1146 // The repne_scan instruction uses fixed registers, which we must spill. 1147 // Don't worry too much about pre-existing connections with the input regs. 1148 1149 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1150 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1151 1152 // Get super_klass value into r0 (even if it was in r5 or r2). 1153 RegSet pushed_registers; 1154 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1155 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1156 1157 if (super_klass != r0 || UseCompressedOops) { 1158 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1159 } 1160 1161 push(pushed_registers, sp); 1162 1163 #ifndef PRODUCT 1164 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1165 Address pst_counter_addr(rscratch2); 1166 ldr(rscratch1, pst_counter_addr); 1167 add(rscratch1, rscratch1, 1); 1168 str(rscratch1, pst_counter_addr); 1169 #endif //PRODUCT 1170 1171 // We will consult the secondary-super array. 1172 ldr(r5, secondary_supers_addr); 1173 // Load the array length. 1174 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1175 // Skip to start of data. 1176 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1177 1178 cmp(sp, zr); // Clear Z flag; SP is never zero 1179 // Scan R2 words at [R5] for an occurrence of R0. 1180 // Set NZ/Z based on last compare. 1181 repne_scan(r5, r0, r2, rscratch1); 1182 1183 // Unspill the temp. registers: 1184 pop(pushed_registers, sp); 1185 1186 br(Assembler::NE, *L_failure); 1187 1188 // Success. Cache the super we found and proceed in triumph. 1189 str(super_klass, super_cache_addr); 1190 1191 if (L_success != &L_fallthrough) { 1192 b(*L_success); 1193 } 1194 1195 #undef IS_A_TEMP 1196 1197 bind(L_fallthrough); 1198 } 1199 1200 1201 void MacroAssembler::verify_oop(Register reg, const char* s) { 1202 if (!VerifyOops) return; 1203 1204 // Pass register number to verify_oop_subroutine 1205 const char* b = NULL; 1206 { 1207 ResourceMark rm; 1208 stringStream ss; 1209 ss.print("verify_oop: %s: %s", reg->name(), s); 1210 b = code_string(ss.as_string()); 1211 } 1212 BLOCK_COMMENT("verify_oop {"); 1213 1214 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1215 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1216 1217 mov(r0, reg); 1218 mov(rscratch1, (address)b); 1219 1220 // call indirectly to solve generation ordering problem 1221 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1222 ldr(rscratch2, Address(rscratch2)); 1223 blr(rscratch2); 1224 1225 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1226 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1227 1228 BLOCK_COMMENT("} verify_oop"); 1229 } 1230 1231 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 1232 if (!VerifyOops) return; 1233 1234 const char* b = NULL; 1235 { 1236 ResourceMark rm; 1237 stringStream ss; 1238 ss.print("verify_oop_addr: %s", s); 1239 b = code_string(ss.as_string()); 1240 } 1241 BLOCK_COMMENT("verify_oop_addr {"); 1242 1243 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1244 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1245 1246 // addr may contain sp so we will have to adjust it based on the 1247 // pushes that we just did. 1248 if (addr.uses(sp)) { 1249 lea(r0, addr); 1250 ldr(r0, Address(r0, 4 * wordSize)); 1251 } else { 1252 ldr(r0, addr); 1253 } 1254 mov(rscratch1, (address)b); 1255 1256 // call indirectly to solve generation ordering problem 1257 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1258 ldr(rscratch2, Address(rscratch2)); 1259 blr(rscratch2); 1260 1261 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1262 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1263 1264 BLOCK_COMMENT("} verify_oop_addr"); 1265 } 1266 1267 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1268 int extra_slot_offset) { 1269 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1270 int stackElementSize = Interpreter::stackElementSize; 1271 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1272 #ifdef ASSERT 1273 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1274 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1275 #endif 1276 if (arg_slot.is_constant()) { 1277 return Address(esp, arg_slot.as_constant() * stackElementSize 1278 + offset); 1279 } else { 1280 add(rscratch1, esp, arg_slot.as_register(), 1281 ext::uxtx, exact_log2(stackElementSize)); 1282 return Address(rscratch1, offset); 1283 } 1284 } 1285 1286 void MacroAssembler::call_VM_leaf_base(address entry_point, 1287 int number_of_arguments, 1288 Label *retaddr) { 1289 call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr); 1290 } 1291 1292 void MacroAssembler::call_VM_leaf_base1(address entry_point, 1293 int number_of_gp_arguments, 1294 int number_of_fp_arguments, 1295 ret_type type, 1296 Label *retaddr) { 1297 Label E, L; 1298 1299 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1300 1301 // We add 1 to number_of_arguments because the thread in arg0 is 1302 // not counted 1303 mov(rscratch1, entry_point); 1304 blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type); 1305 if (retaddr) 1306 bind(*retaddr); 1307 1308 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1309 maybe_isb(); 1310 } 1311 1312 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1313 call_VM_leaf_base(entry_point, number_of_arguments); 1314 } 1315 1316 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1317 pass_arg0(this, arg_0); 1318 call_VM_leaf_base(entry_point, 1); 1319 } 1320 1321 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1322 pass_arg0(this, arg_0); 1323 pass_arg1(this, arg_1); 1324 call_VM_leaf_base(entry_point, 2); 1325 } 1326 1327 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1328 Register arg_1, Register arg_2) { 1329 pass_arg0(this, arg_0); 1330 pass_arg1(this, arg_1); 1331 pass_arg2(this, arg_2); 1332 call_VM_leaf_base(entry_point, 3); 1333 } 1334 1335 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1336 pass_arg0(this, arg_0); 1337 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1338 } 1339 1340 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1341 1342 assert(arg_0 != c_rarg1, "smashed arg"); 1343 pass_arg1(this, arg_1); 1344 pass_arg0(this, arg_0); 1345 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1346 } 1347 1348 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1349 assert(arg_0 != c_rarg2, "smashed arg"); 1350 assert(arg_1 != c_rarg2, "smashed arg"); 1351 pass_arg2(this, arg_2); 1352 assert(arg_0 != c_rarg1, "smashed arg"); 1353 pass_arg1(this, arg_1); 1354 pass_arg0(this, arg_0); 1355 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1356 } 1357 1358 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1359 assert(arg_0 != c_rarg3, "smashed arg"); 1360 assert(arg_1 != c_rarg3, "smashed arg"); 1361 assert(arg_2 != c_rarg3, "smashed arg"); 1362 pass_arg3(this, arg_3); 1363 assert(arg_0 != c_rarg2, "smashed arg"); 1364 assert(arg_1 != c_rarg2, "smashed arg"); 1365 pass_arg2(this, arg_2); 1366 assert(arg_0 != c_rarg1, "smashed arg"); 1367 pass_arg1(this, arg_1); 1368 pass_arg0(this, arg_0); 1369 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1370 } 1371 1372 void MacroAssembler::null_check(Register reg, int offset) { 1373 if (needs_explicit_null_check(offset)) { 1374 // provoke OS NULL exception if reg = NULL by 1375 // accessing M[reg] w/o changing any registers 1376 // NOTE: this is plenty to provoke a segv 1377 1378 if (ShenandoahVerifyReadsToFromSpace) { 1379 oopDesc::bs()->interpreter_read_barrier(this, reg); 1380 } 1381 1382 ldr(zr, Address(reg)); 1383 } else { 1384 // nothing to do, (later) access of M[reg + offset] 1385 // will provoke OS NULL exception if reg = NULL 1386 } 1387 } 1388 1389 // MacroAssembler protected routines needed to implement 1390 // public methods 1391 1392 void MacroAssembler::mov(Register r, Address dest) { 1393 code_section()->relocate(pc(), dest.rspec()); 1394 u_int64_t imm64 = (u_int64_t)dest.target(); 1395 movptr(r, imm64); 1396 } 1397 1398 // Move a constant pointer into r. In AArch64 mode the virtual 1399 // address space is 48 bits in size, so we only need three 1400 // instructions to create a patchable instruction sequence that can 1401 // reach anywhere. 1402 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1403 #ifndef PRODUCT 1404 { 1405 char buffer[64]; 1406 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1407 block_comment(buffer); 1408 } 1409 #endif 1410 assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); 1411 movz(r, imm64 & 0xffff); 1412 imm64 >>= 16; 1413 movk(r, imm64 & 0xffff, 16); 1414 imm64 >>= 16; 1415 movk(r, imm64 & 0xffff, 32); 1416 } 1417 1418 // Macro to mov replicated immediate to vector register. 1419 // Vd will get the following values for different arrangements in T 1420 // imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh 1421 // imm32 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1422 // imm32 == hex 0000efgh T4H: Vd = efghefghefghefgh 1423 // imm32 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1424 // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1425 // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1426 // T1D/T2D: invalid 1427 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { 1428 assert(T != T1D && T != T2D, "invalid arrangement"); 1429 if (T == T8B || T == T16B) { 1430 assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); 1431 movi(Vd, T, imm32 & 0xff, 0); 1432 return; 1433 } 1434 u_int32_t nimm32 = ~imm32; 1435 if (T == T4H || T == T8H) { 1436 assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); 1437 imm32 &= 0xffff; 1438 nimm32 &= 0xffff; 1439 } 1440 u_int32_t x = imm32; 1441 int movi_cnt = 0; 1442 int movn_cnt = 0; 1443 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } 1444 x = nimm32; 1445 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } 1446 if (movn_cnt < movi_cnt) imm32 = nimm32; 1447 unsigned lsl = 0; 1448 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1449 if (movn_cnt < movi_cnt) 1450 mvni(Vd, T, imm32 & 0xff, lsl); 1451 else 1452 movi(Vd, T, imm32 & 0xff, lsl); 1453 imm32 >>= 8; lsl += 8; 1454 while (imm32) { 1455 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1456 if (movn_cnt < movi_cnt) 1457 bici(Vd, T, imm32 & 0xff, lsl); 1458 else 1459 orri(Vd, T, imm32 & 0xff, lsl); 1460 lsl += 8; imm32 >>= 8; 1461 } 1462 } 1463 1464 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) 1465 { 1466 #ifndef PRODUCT 1467 { 1468 char buffer[64]; 1469 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1470 block_comment(buffer); 1471 } 1472 #endif 1473 if (operand_valid_for_logical_immediate(false, imm64)) { 1474 orr(dst, zr, imm64); 1475 } else { 1476 // we can use a combination of MOVZ or MOVN with 1477 // MOVK to build up the constant 1478 u_int64_t imm_h[4]; 1479 int zero_count = 0; 1480 int neg_count = 0; 1481 int i; 1482 for (i = 0; i < 4; i++) { 1483 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1484 if (imm_h[i] == 0) { 1485 zero_count++; 1486 } else if (imm_h[i] == 0xffffL) { 1487 neg_count++; 1488 } 1489 } 1490 if (zero_count == 4) { 1491 // one MOVZ will do 1492 movz(dst, 0); 1493 } else if (neg_count == 4) { 1494 // one MOVN will do 1495 movn(dst, 0); 1496 } else if (zero_count == 3) { 1497 for (i = 0; i < 4; i++) { 1498 if (imm_h[i] != 0L) { 1499 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1500 break; 1501 } 1502 } 1503 } else if (neg_count == 3) { 1504 // one MOVN will do 1505 for (int i = 0; i < 4; i++) { 1506 if (imm_h[i] != 0xffffL) { 1507 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1508 break; 1509 } 1510 } 1511 } else if (zero_count == 2) { 1512 // one MOVZ and one MOVK will do 1513 for (i = 0; i < 3; i++) { 1514 if (imm_h[i] != 0L) { 1515 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1516 i++; 1517 break; 1518 } 1519 } 1520 for (;i < 4; i++) { 1521 if (imm_h[i] != 0L) { 1522 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1523 } 1524 } 1525 } else if (neg_count == 2) { 1526 // one MOVN and one MOVK will do 1527 for (i = 0; i < 4; i++) { 1528 if (imm_h[i] != 0xffffL) { 1529 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1530 i++; 1531 break; 1532 } 1533 } 1534 for (;i < 4; i++) { 1535 if (imm_h[i] != 0xffffL) { 1536 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1537 } 1538 } 1539 } else if (zero_count == 1) { 1540 // one MOVZ and two MOVKs will do 1541 for (i = 0; i < 4; i++) { 1542 if (imm_h[i] != 0L) { 1543 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1544 i++; 1545 break; 1546 } 1547 } 1548 for (;i < 4; i++) { 1549 if (imm_h[i] != 0x0L) { 1550 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1551 } 1552 } 1553 } else if (neg_count == 1) { 1554 // one MOVN and two MOVKs will do 1555 for (i = 0; i < 4; i++) { 1556 if (imm_h[i] != 0xffffL) { 1557 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1558 i++; 1559 break; 1560 } 1561 } 1562 for (;i < 4; i++) { 1563 if (imm_h[i] != 0xffffL) { 1564 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1565 } 1566 } 1567 } else { 1568 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1569 movz(dst, (u_int32_t)imm_h[0], 0); 1570 for (i = 1; i < 4; i++) { 1571 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1572 } 1573 } 1574 } 1575 } 1576 1577 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) 1578 { 1579 #ifndef PRODUCT 1580 { 1581 char buffer[64]; 1582 snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32); 1583 block_comment(buffer); 1584 } 1585 #endif 1586 if (operand_valid_for_logical_immediate(true, imm32)) { 1587 orrw(dst, zr, imm32); 1588 } else { 1589 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1590 // constant 1591 u_int32_t imm_h[2]; 1592 imm_h[0] = imm32 & 0xffff; 1593 imm_h[1] = ((imm32 >> 16) & 0xffff); 1594 if (imm_h[0] == 0) { 1595 movzw(dst, imm_h[1], 16); 1596 } else if (imm_h[0] == 0xffff) { 1597 movnw(dst, imm_h[1] ^ 0xffff, 16); 1598 } else if (imm_h[1] == 0) { 1599 movzw(dst, imm_h[0], 0); 1600 } else if (imm_h[1] == 0xffff) { 1601 movnw(dst, imm_h[0] ^ 0xffff, 0); 1602 } else { 1603 // use a MOVZ and MOVK (makes it easier to debug) 1604 movzw(dst, imm_h[0], 0); 1605 movkw(dst, imm_h[1], 16); 1606 } 1607 } 1608 } 1609 1610 // Form an address from base + offset in Rd. Rd may or may 1611 // not actually be used: you must use the Address that is returned. 1612 // It is up to you to ensure that the shift provided matches the size 1613 // of your data. 1614 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { 1615 if (Address::offset_ok_for_immed(byte_offset, shift)) 1616 // It fits; no need for any heroics 1617 return Address(base, byte_offset); 1618 1619 // Don't do anything clever with negative or misaligned offsets 1620 unsigned mask = (1 << shift) - 1; 1621 if (byte_offset < 0 || byte_offset & mask) { 1622 mov(Rd, byte_offset); 1623 add(Rd, base, Rd); 1624 return Address(Rd); 1625 } 1626 1627 // See if we can do this with two 12-bit offsets 1628 { 1629 unsigned long word_offset = byte_offset >> shift; 1630 unsigned long masked_offset = word_offset & 0xfff000; 1631 if (Address::offset_ok_for_immed(word_offset - masked_offset) 1632 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1633 add(Rd, base, masked_offset << shift); 1634 word_offset -= masked_offset; 1635 return Address(Rd, word_offset << shift); 1636 } 1637 } 1638 1639 // Do it the hard way 1640 mov(Rd, byte_offset); 1641 add(Rd, base, Rd); 1642 return Address(Rd); 1643 } 1644 1645 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) { 1646 if (UseLSE) { 1647 mov(tmp, 1); 1648 ldadd(Assembler::word, tmp, zr, counter_addr); 1649 return; 1650 } 1651 Label retry_load; 1652 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) 1653 prfm(Address(counter_addr), PSTL1STRM); 1654 bind(retry_load); 1655 // flush and load exclusive from the memory location 1656 ldxrw(tmp, counter_addr); 1657 addw(tmp, tmp, 1); 1658 // if we store+flush with no intervening write tmp wil be zero 1659 stxrw(tmp2, tmp, counter_addr); 1660 cbnzw(tmp2, retry_load); 1661 } 1662 1663 1664 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1665 bool want_remainder, Register scratch) 1666 { 1667 // Full implementation of Java idiv and irem. The function 1668 // returns the (pc) offset of the div instruction - may be needed 1669 // for implicit exceptions. 1670 // 1671 // constraint : ra/rb =/= scratch 1672 // normal case 1673 // 1674 // input : ra: dividend 1675 // rb: divisor 1676 // 1677 // result: either 1678 // quotient (= ra idiv rb) 1679 // remainder (= ra irem rb) 1680 1681 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1682 1683 int idivl_offset = offset(); 1684 if (! want_remainder) { 1685 sdivw(result, ra, rb); 1686 } else { 1687 sdivw(scratch, ra, rb); 1688 Assembler::msubw(result, scratch, rb, ra); 1689 } 1690 1691 return idivl_offset; 1692 } 1693 1694 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1695 bool want_remainder, Register scratch) 1696 { 1697 // Full implementation of Java ldiv and lrem. The function 1698 // returns the (pc) offset of the div instruction - may be needed 1699 // for implicit exceptions. 1700 // 1701 // constraint : ra/rb =/= scratch 1702 // normal case 1703 // 1704 // input : ra: dividend 1705 // rb: divisor 1706 // 1707 // result: either 1708 // quotient (= ra idiv rb) 1709 // remainder (= ra irem rb) 1710 1711 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1712 1713 int idivq_offset = offset(); 1714 if (! want_remainder) { 1715 sdiv(result, ra, rb); 1716 } else { 1717 sdiv(scratch, ra, rb); 1718 Assembler::msub(result, scratch, rb, ra); 1719 } 1720 1721 return idivq_offset; 1722 } 1723 1724 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 1725 address prev = pc() - NativeMembar::instruction_size; 1726 if (prev == code()->last_membar()) { 1727 NativeMembar *bar = NativeMembar_at(prev); 1728 // We are merging two memory barrier instructions. On AArch64 we 1729 // can do this simply by ORing them together. 1730 bar->set_kind(bar->get_kind() | order_constraint); 1731 BLOCK_COMMENT("merged membar"); 1732 } else { 1733 code()->set_last_membar(pc()); 1734 dmb(Assembler::barrier(order_constraint)); 1735 } 1736 } 1737 1738 // MacroAssembler routines found actually to be needed 1739 1740 void MacroAssembler::push(Register src) 1741 { 1742 str(src, Address(pre(esp, -1 * wordSize))); 1743 } 1744 1745 void MacroAssembler::pop(Register dst) 1746 { 1747 ldr(dst, Address(post(esp, 1 * wordSize))); 1748 } 1749 1750 // Note: load_unsigned_short used to be called load_unsigned_word. 1751 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1752 int off = offset(); 1753 ldrh(dst, src); 1754 return off; 1755 } 1756 1757 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1758 int off = offset(); 1759 ldrb(dst, src); 1760 return off; 1761 } 1762 1763 int MacroAssembler::load_signed_short(Register dst, Address src) { 1764 int off = offset(); 1765 ldrsh(dst, src); 1766 return off; 1767 } 1768 1769 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1770 int off = offset(); 1771 ldrsb(dst, src); 1772 return off; 1773 } 1774 1775 int MacroAssembler::load_signed_short32(Register dst, Address src) { 1776 int off = offset(); 1777 ldrshw(dst, src); 1778 return off; 1779 } 1780 1781 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 1782 int off = offset(); 1783 ldrsbw(dst, src); 1784 return off; 1785 } 1786 1787 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1788 switch (size_in_bytes) { 1789 case 8: ldr(dst, src); break; 1790 case 4: ldrw(dst, src); break; 1791 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1792 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1793 default: ShouldNotReachHere(); 1794 } 1795 } 1796 1797 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1798 switch (size_in_bytes) { 1799 case 8: str(src, dst); break; 1800 case 4: strw(src, dst); break; 1801 case 2: strh(src, dst); break; 1802 case 1: strb(src, dst); break; 1803 default: ShouldNotReachHere(); 1804 } 1805 } 1806 1807 void MacroAssembler::decrementw(Register reg, int value) 1808 { 1809 if (value < 0) { incrementw(reg, -value); return; } 1810 if (value == 0) { return; } 1811 if (value < (1 << 12)) { subw(reg, reg, value); return; } 1812 /* else */ { 1813 guarantee(reg != rscratch2, "invalid dst for register decrement"); 1814 movw(rscratch2, (unsigned)value); 1815 subw(reg, reg, rscratch2); 1816 } 1817 } 1818 1819 void MacroAssembler::decrement(Register reg, int value) 1820 { 1821 if (value < 0) { increment(reg, -value); return; } 1822 if (value == 0) { return; } 1823 if (value < (1 << 12)) { sub(reg, reg, value); return; } 1824 /* else */ { 1825 assert(reg != rscratch2, "invalid dst for register decrement"); 1826 mov(rscratch2, (unsigned long)value); 1827 sub(reg, reg, rscratch2); 1828 } 1829 } 1830 1831 void MacroAssembler::decrementw(Address dst, int value) 1832 { 1833 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 1834 ldrw(rscratch1, dst); 1835 decrementw(rscratch1, value); 1836 strw(rscratch1, dst); 1837 } 1838 1839 void MacroAssembler::decrement(Address dst, int value) 1840 { 1841 assert(!dst.uses(rscratch1), "invalid address for decrement"); 1842 ldr(rscratch1, dst); 1843 decrement(rscratch1, value); 1844 str(rscratch1, dst); 1845 } 1846 1847 void MacroAssembler::incrementw(Register reg, int value) 1848 { 1849 if (value < 0) { decrementw(reg, -value); return; } 1850 if (value == 0) { return; } 1851 if (value < (1 << 12)) { addw(reg, reg, value); return; } 1852 /* else */ { 1853 assert(reg != rscratch2, "invalid dst for register increment"); 1854 movw(rscratch2, (unsigned)value); 1855 addw(reg, reg, rscratch2); 1856 } 1857 } 1858 1859 void MacroAssembler::increment(Register reg, int value) 1860 { 1861 if (value < 0) { decrement(reg, -value); return; } 1862 if (value == 0) { return; } 1863 if (value < (1 << 12)) { add(reg, reg, value); return; } 1864 /* else */ { 1865 assert(reg != rscratch2, "invalid dst for register increment"); 1866 movw(rscratch2, (unsigned)value); 1867 add(reg, reg, rscratch2); 1868 } 1869 } 1870 1871 void MacroAssembler::incrementw(Address dst, int value) 1872 { 1873 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1874 ldrw(rscratch1, dst); 1875 incrementw(rscratch1, value); 1876 strw(rscratch1, dst); 1877 } 1878 1879 void MacroAssembler::increment(Address dst, int value) 1880 { 1881 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1882 ldr(rscratch1, dst); 1883 increment(rscratch1, value); 1884 str(rscratch1, dst); 1885 } 1886 1887 1888 void MacroAssembler::pusha() { 1889 push(0x7fffffff, sp); 1890 } 1891 1892 void MacroAssembler::popa() { 1893 pop(0x7fffffff, sp); 1894 } 1895 1896 // Push lots of registers in the bit set supplied. Don't push sp. 1897 // Return the number of words pushed 1898 int MacroAssembler::push(unsigned int bitset, Register stack) { 1899 int words_pushed = 0; 1900 1901 // Scan bitset to accumulate register pairs 1902 unsigned char regs[32]; 1903 int count = 0; 1904 for (int reg = 0; reg <= 30; reg++) { 1905 if (1 & bitset) 1906 regs[count++] = reg; 1907 bitset >>= 1; 1908 } 1909 regs[count++] = zr->encoding_nocheck(); 1910 count &= ~1; // Only push an even nuber of regs 1911 1912 if (count) { 1913 stp(as_Register(regs[0]), as_Register(regs[1]), 1914 Address(pre(stack, -count * wordSize))); 1915 words_pushed += 2; 1916 } 1917 for (int i = 2; i < count; i += 2) { 1918 stp(as_Register(regs[i]), as_Register(regs[i+1]), 1919 Address(stack, i * wordSize)); 1920 words_pushed += 2; 1921 } 1922 1923 assert(words_pushed == count, "oops, pushed != count"); 1924 1925 return count; 1926 } 1927 1928 int MacroAssembler::pop(unsigned int bitset, Register stack) { 1929 int words_pushed = 0; 1930 1931 // Scan bitset to accumulate register pairs 1932 unsigned char regs[32]; 1933 int count = 0; 1934 for (int reg = 0; reg <= 30; reg++) { 1935 if (1 & bitset) 1936 regs[count++] = reg; 1937 bitset >>= 1; 1938 } 1939 regs[count++] = zr->encoding_nocheck(); 1940 count &= ~1; 1941 1942 for (int i = 2; i < count; i += 2) { 1943 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 1944 Address(stack, i * wordSize)); 1945 words_pushed += 2; 1946 } 1947 if (count) { 1948 ldp(as_Register(regs[0]), as_Register(regs[1]), 1949 Address(post(stack, count * wordSize))); 1950 words_pushed += 2; 1951 } 1952 1953 assert(words_pushed == count, "oops, pushed != count"); 1954 1955 return count; 1956 } 1957 #ifdef ASSERT 1958 void MacroAssembler::verify_heapbase(const char* msg) { 1959 #if 0 1960 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 1961 assert (Universe::heap() != NULL, "java heap should be initialized"); 1962 if (CheckCompressedOops) { 1963 Label ok; 1964 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 1965 cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 1966 br(Assembler::EQ, ok); 1967 stop(msg); 1968 bind(ok); 1969 pop(1 << rscratch1->encoding(), sp); 1970 } 1971 #endif 1972 } 1973 #endif 1974 1975 void MacroAssembler::stop(const char* msg, Label *l) { 1976 address ip = pc(); 1977 pusha(); 1978 mov(c_rarg0, (address)msg); 1979 if (! l) { 1980 adr(c_rarg1, (address)ip); 1981 } else { 1982 adr(c_rarg1, *l); 1983 } 1984 mov(c_rarg2, sp); 1985 mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); 1986 // call(c_rarg3); 1987 blrt(c_rarg3, 3, 0, 1); 1988 hlt(0); 1989 } 1990 1991 // If a constant does not fit in an immediate field, generate some 1992 // number of MOV instructions and then perform the operation. 1993 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1994 add_sub_imm_insn insn1, 1995 add_sub_reg_insn insn2) { 1996 assert(Rd != zr, "Rd = zr and not setting flags?"); 1997 if (operand_valid_for_add_sub_immediate((int)imm)) { 1998 (this->*insn1)(Rd, Rn, imm); 1999 } else { 2000 if (uabs(imm) < (1 << 24)) { 2001 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 2002 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 2003 } else { 2004 assert_different_registers(Rd, Rn); 2005 mov(Rd, (uint64_t)imm); 2006 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2007 } 2008 } 2009 } 2010 2011 // Seperate vsn which sets the flags. Optimisations are more restricted 2012 // because we must set the flags correctly. 2013 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 2014 add_sub_imm_insn insn1, 2015 add_sub_reg_insn insn2) { 2016 if (operand_valid_for_add_sub_immediate((int)imm)) { 2017 (this->*insn1)(Rd, Rn, imm); 2018 } else { 2019 assert_different_registers(Rd, Rn); 2020 assert(Rd != zr, "overflow in immediate operand"); 2021 mov(Rd, (uint64_t)imm); 2022 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 2023 } 2024 } 2025 2026 2027 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 2028 if (increment.is_register()) { 2029 add(Rd, Rn, increment.as_register()); 2030 } else { 2031 add(Rd, Rn, increment.as_constant()); 2032 } 2033 } 2034 2035 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2036 if (increment.is_register()) { 2037 addw(Rd, Rn, increment.as_register()); 2038 } else { 2039 addw(Rd, Rn, increment.as_constant()); 2040 } 2041 } 2042 2043 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 2044 if (decrement.is_register()) { 2045 sub(Rd, Rn, decrement.as_register()); 2046 } else { 2047 sub(Rd, Rn, decrement.as_constant()); 2048 } 2049 } 2050 2051 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 2052 if (decrement.is_register()) { 2053 subw(Rd, Rn, decrement.as_register()); 2054 } else { 2055 subw(Rd, Rn, decrement.as_constant()); 2056 } 2057 } 2058 2059 void MacroAssembler::reinit_heapbase() 2060 { 2061 if (UseCompressedOops) { 2062 if (Universe::is_fully_initialized()) { 2063 mov(rheapbase, Universe::narrow_ptrs_base()); 2064 } else { 2065 lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 2066 ldr(rheapbase, Address(rheapbase)); 2067 } 2068 } 2069 } 2070 2071 // this simulates the behaviour of the x86 cmpxchg instruction using a 2072 // load linked/store conditional pair. we use the acquire/release 2073 // versions of these instructions so that we flush pending writes as 2074 // per Java semantics. 2075 2076 // n.b the x86 version assumes the old value to be compared against is 2077 // in rax and updates rax with the value located in memory if the 2078 // cmpxchg fails. we supply a register for the old value explicitly 2079 2080 // the aarch64 load linked/store conditional instructions do not 2081 // accept an offset. so, unlike x86, we must provide a plain register 2082 // to identify the memory word to be compared/exchanged rather than a 2083 // register+offset Address. 2084 2085 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2086 Label &succeed, Label *fail) { 2087 // oldv holds comparison value 2088 // newv holds value to write in exchange 2089 // addr identifies memory word to compare against/update 2090 if (UseLSE) { 2091 mov(tmp, oldv); 2092 casal(Assembler::xword, oldv, newv, addr); 2093 cmp(tmp, oldv); 2094 br(Assembler::EQ, succeed); 2095 membar(AnyAny); 2096 } else { 2097 Label retry_load, nope; 2098 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) 2099 prfm(Address(addr), PSTL1STRM); 2100 bind(retry_load); 2101 // flush and load exclusive from the memory location 2102 // and fail if it is not what we expect 2103 ldaxr(tmp, addr); 2104 cmp(tmp, oldv); 2105 br(Assembler::NE, nope); 2106 // if we store+flush with no intervening write tmp wil be zero 2107 stlxr(tmp, newv, addr); 2108 cbzw(tmp, succeed); 2109 // retry so we only ever return after a load fails to compare 2110 // ensures we don't return a stale value after a failed write. 2111 b(retry_load); 2112 // if the memory word differs we return it in oldv and signal a fail 2113 bind(nope); 2114 membar(AnyAny); 2115 mov(oldv, tmp); 2116 } 2117 if (fail) 2118 b(*fail); 2119 } 2120 2121 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2122 Label &succeed, Label *fail) { 2123 // oldv holds comparison value 2124 // newv holds value to write in exchange 2125 // addr identifies memory word to compare against/update 2126 // tmp returns 0/1 for success/failure 2127 if (UseLSE) { 2128 mov(tmp, oldv); 2129 casal(Assembler::word, oldv, newv, addr); 2130 cmp(tmp, oldv); 2131 br(Assembler::EQ, succeed); 2132 membar(AnyAny); 2133 } else { 2134 Label retry_load, nope; 2135 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) 2136 prfm(Address(addr), PSTL1STRM); 2137 bind(retry_load); 2138 // flush and load exclusive from the memory location 2139 // and fail if it is not what we expect 2140 ldaxrw(tmp, addr); 2141 cmp(tmp, oldv); 2142 br(Assembler::NE, nope); 2143 // if we store+flush with no intervening write tmp wil be zero 2144 stlxrw(tmp, newv, addr); 2145 cbzw(tmp, succeed); 2146 // retry so we only ever return after a load fails to compare 2147 // ensures we don't return a stale value after a failed write. 2148 b(retry_load); 2149 // if the memory word differs we return it in oldv and signal a fail 2150 bind(nope); 2151 membar(AnyAny); 2152 mov(oldv, tmp); 2153 } 2154 if (fail) 2155 b(*fail); 2156 } 2157 2158 // A generic CAS; success or failure is in the EQ flag. A weak CAS 2159 // doesn't retry and may fail spuriously. If the oldval is wanted, 2160 // Pass a register for the result, otherwise pass noreg. 2161 2162 // Clobbers rscratch1 2163 void MacroAssembler::cmpxchg(Register addr, Register expected, 2164 Register new_val, 2165 enum operand_size size, 2166 bool acquire, bool release, 2167 bool weak, 2168 Register result) { 2169 if (result == noreg) result = rscratch1; 2170 if (UseLSE) { 2171 mov(result, expected); 2172 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 2173 cmp(result, expected); 2174 } else { 2175 BLOCK_COMMENT("cmpxchg {"); 2176 Label retry_load, done; 2177 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) 2178 prfm(Address(addr), PSTL1STRM); 2179 bind(retry_load); 2180 load_exclusive(result, addr, size, acquire); 2181 if (size == xword) 2182 cmp(result, expected); 2183 else 2184 cmpw(result, expected); 2185 br(Assembler::NE, done); 2186 store_exclusive(rscratch1, new_val, addr, size, release); 2187 if (weak) { 2188 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 2189 } else { 2190 cbnzw(rscratch1, retry_load); 2191 } 2192 bind(done); 2193 BLOCK_COMMENT("} cmpxchg"); 2194 } 2195 } 2196 2197 void MacroAssembler::cmpxchg_oop_shenandoah(Register res, Register addr, Register expected, 2198 Register new_val, 2199 bool narrow, 2200 bool acquire, bool release, 2201 Register tmp1, Register tmp2) { 2202 assert(UseShenandoahGC, "only for shenandoah"); 2203 assert_different_registers(res, addr, expected, new_val, tmp1, tmp2); 2204 2205 Label retry, done, fail; 2206 2207 mov(res, 0); 2208 2209 // CAS, using LL/SC pair. 2210 bind(retry); 2211 load_exclusive(tmp1, addr, narrow ? word : xword, true); 2212 if (narrow) cmpw(tmp1, expected); 2213 else cmp(tmp1, expected); 2214 br(Assembler::NE, fail); 2215 store_exclusive(tmp2, new_val, addr, narrow ? word : xword, true); 2216 cbnzw(tmp2, retry); 2217 mov(res, 1); 2218 b(done); 2219 2220 bind(fail); 2221 // Check if rb(expected)==rb(tmp1) 2222 // Shuffle registers so that we have memory value ready for next expected. 2223 mov(tmp2, expected); 2224 mov(expected, tmp1); 2225 if (narrow) { 2226 decode_heap_oop(tmp1, tmp1); 2227 decode_heap_oop(tmp2, tmp2); 2228 } 2229 oopDesc::bs()->interpreter_read_barrier(this, tmp1); 2230 oopDesc::bs()->interpreter_read_barrier(this, tmp2); 2231 cmp(tmp1, tmp2); 2232 // Retry with expected now being the value we just loaded from addr. 2233 br(Assembler::EQ, retry); 2234 2235 bind(done); 2236 membar(AnyAny); 2237 } 2238 2239 static bool different(Register a, RegisterOrConstant b, Register c) { 2240 if (b.is_constant()) 2241 return a != c; 2242 else 2243 return a != b.as_register() && a != c && b.as_register() != c; 2244 } 2245 2246 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 2247 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 2248 if (UseLSE) { \ 2249 prev = prev->is_valid() ? prev : zr; \ 2250 if (incr.is_register()) { \ 2251 AOP(sz, incr.as_register(), prev, addr); \ 2252 } else { \ 2253 mov(rscratch2, incr.as_constant()); \ 2254 AOP(sz, rscratch2, prev, addr); \ 2255 } \ 2256 return; \ 2257 } \ 2258 Register result = rscratch2; \ 2259 if (prev->is_valid()) \ 2260 result = different(prev, incr, addr) ? prev : rscratch2; \ 2261 \ 2262 Label retry_load; \ 2263 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) \ 2264 prfm(Address(addr), PSTL1STRM); \ 2265 bind(retry_load); \ 2266 LDXR(result, addr); \ 2267 OP(rscratch1, result, incr); \ 2268 STXR(rscratch2, rscratch1, addr); \ 2269 cbnzw(rscratch2, retry_load); \ 2270 if (prev->is_valid() && prev != result) { \ 2271 IOP(prev, rscratch1, incr); \ 2272 } \ 2273 } 2274 2275 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 2276 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 2277 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 2278 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 2279 2280 #undef ATOMIC_OP 2281 2282 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 2283 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2284 if (UseLSE) { \ 2285 prev = prev->is_valid() ? prev : zr; \ 2286 AOP(sz, newv, prev, addr); \ 2287 return; \ 2288 } \ 2289 Register result = rscratch2; \ 2290 if (prev->is_valid()) \ 2291 result = different(prev, newv, addr) ? prev : rscratch2; \ 2292 \ 2293 Label retry_load; \ 2294 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) \ 2295 prfm(Address(addr), PSTL1STRM); \ 2296 bind(retry_load); \ 2297 LDXR(result, addr); \ 2298 STXR(rscratch1, newv, addr); \ 2299 cbnzw(rscratch1, retry_load); \ 2300 if (prev->is_valid() && prev != result) \ 2301 mov(prev, result); \ 2302 } 2303 2304 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 2305 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 2306 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 2307 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 2308 2309 #undef ATOMIC_XCHG 2310 2311 void MacroAssembler::incr_allocated_bytes(Register thread, 2312 Register var_size_in_bytes, 2313 int con_size_in_bytes, 2314 Register t1) { 2315 if (!thread->is_valid()) { 2316 thread = rthread; 2317 } 2318 assert(t1->is_valid(), "need temp reg"); 2319 2320 ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2321 if (var_size_in_bytes->is_valid()) { 2322 add(t1, t1, var_size_in_bytes); 2323 } else { 2324 add(t1, t1, con_size_in_bytes); 2325 } 2326 str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2327 } 2328 2329 #ifndef PRODUCT 2330 extern "C" void findpc(intptr_t x); 2331 #endif 2332 2333 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2334 { 2335 // In order to get locks to work, we need to fake a in_VM state 2336 if (ShowMessageBoxOnError ) { 2337 JavaThread* thread = JavaThread::current(); 2338 JavaThreadState saved_state = thread->thread_state(); 2339 thread->set_thread_state(_thread_in_vm); 2340 #ifndef PRODUCT 2341 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2342 ttyLocker ttyl; 2343 BytecodeCounter::print(); 2344 } 2345 #endif 2346 2347 if (os::message_box(msg, "Execution stopped, print registers?")) { 2348 ttyLocker ttyl; 2349 tty->print_cr(" pc = 0x%016lx", pc); 2350 #ifndef PRODUCT 2351 tty->cr(); 2352 findpc(pc); 2353 tty->cr(); 2354 #endif 2355 tty->print_cr(" r0 = 0x%016lx", regs[0]); 2356 tty->print_cr(" r1 = 0x%016lx", regs[1]); 2357 tty->print_cr(" r2 = 0x%016lx", regs[2]); 2358 tty->print_cr(" r3 = 0x%016lx", regs[3]); 2359 tty->print_cr(" r4 = 0x%016lx", regs[4]); 2360 tty->print_cr(" r5 = 0x%016lx", regs[5]); 2361 tty->print_cr(" r6 = 0x%016lx", regs[6]); 2362 tty->print_cr(" r7 = 0x%016lx", regs[7]); 2363 tty->print_cr(" r8 = 0x%016lx", regs[8]); 2364 tty->print_cr(" r9 = 0x%016lx", regs[9]); 2365 tty->print_cr("r10 = 0x%016lx", regs[10]); 2366 tty->print_cr("r11 = 0x%016lx", regs[11]); 2367 tty->print_cr("r12 = 0x%016lx", regs[12]); 2368 tty->print_cr("r13 = 0x%016lx", regs[13]); 2369 tty->print_cr("r14 = 0x%016lx", regs[14]); 2370 tty->print_cr("r15 = 0x%016lx", regs[15]); 2371 tty->print_cr("r16 = 0x%016lx", regs[16]); 2372 tty->print_cr("r17 = 0x%016lx", regs[17]); 2373 tty->print_cr("r18 = 0x%016lx", regs[18]); 2374 tty->print_cr("r19 = 0x%016lx", regs[19]); 2375 tty->print_cr("r20 = 0x%016lx", regs[20]); 2376 tty->print_cr("r21 = 0x%016lx", regs[21]); 2377 tty->print_cr("r22 = 0x%016lx", regs[22]); 2378 tty->print_cr("r23 = 0x%016lx", regs[23]); 2379 tty->print_cr("r24 = 0x%016lx", regs[24]); 2380 tty->print_cr("r25 = 0x%016lx", regs[25]); 2381 tty->print_cr("r26 = 0x%016lx", regs[26]); 2382 tty->print_cr("r27 = 0x%016lx", regs[27]); 2383 tty->print_cr("r28 = 0x%016lx", regs[28]); 2384 tty->print_cr("r30 = 0x%016lx", regs[30]); 2385 tty->print_cr("r31 = 0x%016lx", regs[31]); 2386 BREAKPOINT; 2387 } 2388 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 2389 } else { 2390 ttyLocker ttyl; 2391 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 2392 msg); 2393 os::print_location(tty, pc, true); 2394 // A good place for a breakpoint: 2395 asm volatile("nop" : : "r"(pc), "r"(regs)); 2396 assert(false, "DEBUG MESSAGE: %s", msg); 2397 } 2398 } 2399 2400 #ifdef BUILTIN_SIM 2401 // routine to generate an x86 prolog for a stub function which 2402 // bootstraps into the generated ARM code which directly follows the 2403 // stub 2404 // 2405 // the argument encodes the number of general and fp registers 2406 // passed by the caller and the callng convention (currently just 2407 // the number of general registers and assumes C argument passing) 2408 2409 extern "C" { 2410 int aarch64_stub_prolog_size(); 2411 void aarch64_stub_prolog(); 2412 void aarch64_prolog(); 2413 } 2414 2415 void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, 2416 address *prolog_ptr) 2417 { 2418 int calltype = (((ret_type & 0x3) << 8) | 2419 ((fp_arg_count & 0xf) << 4) | 2420 (gp_arg_count & 0xf)); 2421 2422 // the addresses for the x86 to ARM entry code we need to use 2423 address start = pc(); 2424 // printf("start = %lx\n", start); 2425 int byteCount = aarch64_stub_prolog_size(); 2426 // printf("byteCount = %x\n", byteCount); 2427 int instructionCount = (byteCount + 3)/ 4; 2428 // printf("instructionCount = %x\n", instructionCount); 2429 for (int i = 0; i < instructionCount; i++) { 2430 nop(); 2431 } 2432 2433 memcpy(start, (void*)aarch64_stub_prolog, byteCount); 2434 2435 // write the address of the setup routine and the call format at the 2436 // end of into the copied code 2437 u_int64_t *patch_end = (u_int64_t *)(start + byteCount); 2438 if (prolog_ptr) 2439 patch_end[-2] = (u_int64_t)prolog_ptr; 2440 patch_end[-1] = calltype; 2441 } 2442 #endif 2443 2444 void MacroAssembler::push_call_clobbered_registers() { 2445 push(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp); 2446 2447 // Push v0-v7, v16-v31. 2448 for (int i = 30; i >= 0; i -= 2) { 2449 if (i <= v7->encoding() || i >= v16->encoding()) { 2450 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2451 Address(pre(sp, -2 * wordSize))); 2452 } 2453 } 2454 } 2455 2456 void MacroAssembler::pop_call_clobbered_registers() { 2457 2458 for (int i = 0; i < 32; i += 2) { 2459 if (i <= v7->encoding() || i >= v16->encoding()) { 2460 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2461 Address(post(sp, 2 * wordSize))); 2462 } 2463 } 2464 2465 pop(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp); 2466 } 2467 2468 void MacroAssembler::push_CPU_state(bool save_vectors) { 2469 push(0x3fffffff, sp); // integer registers except lr & sp 2470 2471 if (!save_vectors) { 2472 for (int i = 30; i >= 0; i -= 2) 2473 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2474 Address(pre(sp, -2 * wordSize))); 2475 } else { 2476 for (int i = 30; i >= 0; i -= 2) 2477 stpq(as_FloatRegister(i), as_FloatRegister(i+1), 2478 Address(pre(sp, -4 * wordSize))); 2479 } 2480 } 2481 2482 void MacroAssembler::pop_CPU_state(bool restore_vectors) { 2483 if (!restore_vectors) { 2484 for (int i = 0; i < 32; i += 2) 2485 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2486 Address(post(sp, 2 * wordSize))); 2487 } else { 2488 for (int i = 0; i < 32; i += 2) 2489 ldpq(as_FloatRegister(i), as_FloatRegister(i+1), 2490 Address(post(sp, 4 * wordSize))); 2491 } 2492 2493 pop(0x3fffffff, sp); // integer registers except lr & sp 2494 } 2495 2496 /** 2497 * Helpers for multiply_to_len(). 2498 */ 2499 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 2500 Register src1, Register src2) { 2501 adds(dest_lo, dest_lo, src1); 2502 adc(dest_hi, dest_hi, zr); 2503 adds(dest_lo, dest_lo, src2); 2504 adc(final_dest_hi, dest_hi, zr); 2505 } 2506 2507 // Generate an address from (r + r1 extend offset). "size" is the 2508 // size of the operand. The result may be in rscratch2. 2509 Address MacroAssembler::offsetted_address(Register r, Register r1, 2510 Address::extend ext, int offset, int size) { 2511 if (offset || (ext.shift() % size != 0)) { 2512 lea(rscratch2, Address(r, r1, ext)); 2513 return Address(rscratch2, offset); 2514 } else { 2515 return Address(r, r1, ext); 2516 } 2517 } 2518 2519 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 2520 { 2521 assert(offset >= 0, "spill to negative address?"); 2522 // Offset reachable ? 2523 // Not aligned - 9 bits signed offset 2524 // Aligned - 12 bits unsigned offset shifted 2525 Register base = sp; 2526 if ((offset & (size-1)) && offset >= (1<<8)) { 2527 add(tmp, base, offset & ((1<<12)-1)); 2528 base = tmp; 2529 offset &= -1<<12; 2530 } 2531 2532 if (offset >= (1<<12) * size) { 2533 add(tmp, base, offset & (((1<<12)-1)<<12)); 2534 base = tmp; 2535 offset &= ~(((1<<12)-1)<<12); 2536 } 2537 2538 return Address(base, offset); 2539 } 2540 2541 /** 2542 * Multiply 64 bit by 64 bit first loop. 2543 */ 2544 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2545 Register y, Register y_idx, Register z, 2546 Register carry, Register product, 2547 Register idx, Register kdx) { 2548 // 2549 // jlong carry, x[], y[], z[]; 2550 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2551 // huge_128 product = y[idx] * x[xstart] + carry; 2552 // z[kdx] = (jlong)product; 2553 // carry = (jlong)(product >>> 64); 2554 // } 2555 // z[xstart] = carry; 2556 // 2557 2558 Label L_first_loop, L_first_loop_exit; 2559 Label L_one_x, L_one_y, L_multiply; 2560 2561 subsw(xstart, xstart, 1); 2562 br(Assembler::MI, L_one_x); 2563 2564 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 2565 ldr(x_xstart, Address(rscratch1)); 2566 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 2567 2568 bind(L_first_loop); 2569 subsw(idx, idx, 1); 2570 br(Assembler::MI, L_first_loop_exit); 2571 subsw(idx, idx, 1); 2572 br(Assembler::MI, L_one_y); 2573 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2574 ldr(y_idx, Address(rscratch1)); 2575 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 2576 bind(L_multiply); 2577 2578 // AArch64 has a multiply-accumulate instruction that we can't use 2579 // here because it has no way to process carries, so we have to use 2580 // separate add and adc instructions. Bah. 2581 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 2582 mul(product, x_xstart, y_idx); 2583 adds(product, product, carry); 2584 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 2585 2586 subw(kdx, kdx, 2); 2587 ror(product, product, 32); // back to big-endian 2588 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 2589 2590 b(L_first_loop); 2591 2592 bind(L_one_y); 2593 ldrw(y_idx, Address(y, 0)); 2594 b(L_multiply); 2595 2596 bind(L_one_x); 2597 ldrw(x_xstart, Address(x, 0)); 2598 b(L_first_loop); 2599 2600 bind(L_first_loop_exit); 2601 } 2602 2603 /** 2604 * Multiply 128 bit by 128. Unrolled inner loop. 2605 * 2606 */ 2607 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 2608 Register carry, Register carry2, 2609 Register idx, Register jdx, 2610 Register yz_idx1, Register yz_idx2, 2611 Register tmp, Register tmp3, Register tmp4, 2612 Register tmp6, Register product_hi) { 2613 2614 // jlong carry, x[], y[], z[]; 2615 // int kdx = ystart+1; 2616 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 2617 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 2618 // jlong carry2 = (jlong)(tmp3 >>> 64); 2619 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 2620 // carry = (jlong)(tmp4 >>> 64); 2621 // z[kdx+idx+1] = (jlong)tmp3; 2622 // z[kdx+idx] = (jlong)tmp4; 2623 // } 2624 // idx += 2; 2625 // if (idx > 0) { 2626 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 2627 // z[kdx+idx] = (jlong)yz_idx1; 2628 // carry = (jlong)(yz_idx1 >>> 64); 2629 // } 2630 // 2631 2632 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 2633 2634 lsrw(jdx, idx, 2); 2635 2636 bind(L_third_loop); 2637 2638 subsw(jdx, jdx, 1); 2639 br(Assembler::MI, L_third_loop_exit); 2640 subw(idx, idx, 4); 2641 2642 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2643 2644 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 2645 2646 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2647 2648 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 2649 ror(yz_idx2, yz_idx2, 32); 2650 2651 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 2652 2653 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2654 umulh(tmp4, product_hi, yz_idx1); 2655 2656 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 2657 ror(rscratch2, rscratch2, 32); 2658 2659 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 2660 umulh(carry2, product_hi, yz_idx2); 2661 2662 // propagate sum of both multiplications into carry:tmp4:tmp3 2663 adds(tmp3, tmp3, carry); 2664 adc(tmp4, tmp4, zr); 2665 adds(tmp3, tmp3, rscratch1); 2666 adcs(tmp4, tmp4, tmp); 2667 adc(carry, carry2, zr); 2668 adds(tmp4, tmp4, rscratch2); 2669 adc(carry, carry, zr); 2670 2671 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 2672 ror(tmp4, tmp4, 32); 2673 stp(tmp4, tmp3, Address(tmp6, 0)); 2674 2675 b(L_third_loop); 2676 bind (L_third_loop_exit); 2677 2678 andw (idx, idx, 0x3); 2679 cbz(idx, L_post_third_loop_done); 2680 2681 Label L_check_1; 2682 subsw(idx, idx, 2); 2683 br(Assembler::MI, L_check_1); 2684 2685 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2686 ldr(yz_idx1, Address(rscratch1, 0)); 2687 ror(yz_idx1, yz_idx1, 32); 2688 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2689 umulh(tmp4, product_hi, yz_idx1); 2690 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2691 ldr(yz_idx2, Address(rscratch1, 0)); 2692 ror(yz_idx2, yz_idx2, 32); 2693 2694 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 2695 2696 ror(tmp3, tmp3, 32); 2697 str(tmp3, Address(rscratch1, 0)); 2698 2699 bind (L_check_1); 2700 2701 andw (idx, idx, 0x1); 2702 subsw(idx, idx, 1); 2703 br(Assembler::MI, L_post_third_loop_done); 2704 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2705 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 2706 umulh(carry2, tmp4, product_hi); 2707 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2708 2709 add2_with_carry(carry2, tmp3, tmp4, carry); 2710 2711 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2712 extr(carry, carry2, tmp3, 32); 2713 2714 bind(L_post_third_loop_done); 2715 } 2716 2717 /** 2718 * Code for BigInteger::multiplyToLen() instrinsic. 2719 * 2720 * r0: x 2721 * r1: xlen 2722 * r2: y 2723 * r3: ylen 2724 * r4: z 2725 * r5: zlen 2726 * r10: tmp1 2727 * r11: tmp2 2728 * r12: tmp3 2729 * r13: tmp4 2730 * r14: tmp5 2731 * r15: tmp6 2732 * r16: tmp7 2733 * 2734 */ 2735 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 2736 Register z, Register zlen, 2737 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 2738 Register tmp5, Register tmp6, Register product_hi) { 2739 2740 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 2741 2742 const Register idx = tmp1; 2743 const Register kdx = tmp2; 2744 const Register xstart = tmp3; 2745 2746 const Register y_idx = tmp4; 2747 const Register carry = tmp5; 2748 const Register product = xlen; 2749 const Register x_xstart = zlen; // reuse register 2750 2751 // First Loop. 2752 // 2753 // final static long LONG_MASK = 0xffffffffL; 2754 // int xstart = xlen - 1; 2755 // int ystart = ylen - 1; 2756 // long carry = 0; 2757 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2758 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 2759 // z[kdx] = (int)product; 2760 // carry = product >>> 32; 2761 // } 2762 // z[xstart] = (int)carry; 2763 // 2764 2765 movw(idx, ylen); // idx = ylen; 2766 movw(kdx, zlen); // kdx = xlen+ylen; 2767 mov(carry, zr); // carry = 0; 2768 2769 Label L_done; 2770 2771 movw(xstart, xlen); 2772 subsw(xstart, xstart, 1); 2773 br(Assembler::MI, L_done); 2774 2775 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 2776 2777 Label L_second_loop; 2778 cbzw(kdx, L_second_loop); 2779 2780 Label L_carry; 2781 subw(kdx, kdx, 1); 2782 cbzw(kdx, L_carry); 2783 2784 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2785 lsr(carry, carry, 32); 2786 subw(kdx, kdx, 1); 2787 2788 bind(L_carry); 2789 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2790 2791 // Second and third (nested) loops. 2792 // 2793 // for (int i = xstart-1; i >= 0; i--) { // Second loop 2794 // carry = 0; 2795 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 2796 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 2797 // (z[k] & LONG_MASK) + carry; 2798 // z[k] = (int)product; 2799 // carry = product >>> 32; 2800 // } 2801 // z[i] = (int)carry; 2802 // } 2803 // 2804 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 2805 2806 const Register jdx = tmp1; 2807 2808 bind(L_second_loop); 2809 mov(carry, zr); // carry = 0; 2810 movw(jdx, ylen); // j = ystart+1 2811 2812 subsw(xstart, xstart, 1); // i = xstart-1; 2813 br(Assembler::MI, L_done); 2814 2815 str(z, Address(pre(sp, -4 * wordSize))); 2816 2817 Label L_last_x; 2818 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 2819 subsw(xstart, xstart, 1); // i = xstart-1; 2820 br(Assembler::MI, L_last_x); 2821 2822 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 2823 ldr(product_hi, Address(rscratch1)); 2824 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 2825 2826 Label L_third_loop_prologue; 2827 bind(L_third_loop_prologue); 2828 2829 str(ylen, Address(sp, wordSize)); 2830 stp(x, xstart, Address(sp, 2 * wordSize)); 2831 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 2832 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 2833 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 2834 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 2835 2836 addw(tmp3, xlen, 1); 2837 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2838 subsw(tmp3, tmp3, 1); 2839 br(Assembler::MI, L_done); 2840 2841 lsr(carry, carry, 32); 2842 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2843 b(L_second_loop); 2844 2845 // Next infrequent code is moved outside loops. 2846 bind(L_last_x); 2847 ldrw(product_hi, Address(x, 0)); 2848 b(L_third_loop_prologue); 2849 2850 bind(L_done); 2851 } 2852 2853 /** 2854 * Emits code to update CRC-32 with a byte value according to constants in table 2855 * 2856 * @param [in,out]crc Register containing the crc. 2857 * @param [in]val Register containing the byte to fold into the CRC. 2858 * @param [in]table Register containing the table of crc constants. 2859 * 2860 * uint32_t crc; 2861 * val = crc_table[(val ^ crc) & 0xFF]; 2862 * crc = val ^ (crc >> 8); 2863 * 2864 */ 2865 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 2866 eor(val, val, crc); 2867 andr(val, val, 0xff); 2868 ldrw(val, Address(table, val, Address::lsl(2))); 2869 eor(crc, val, crc, Assembler::LSR, 8); 2870 } 2871 2872 /** 2873 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 2874 * 2875 * @param [in,out]crc Register containing the crc. 2876 * @param [in]v Register containing the 32-bit to fold into the CRC. 2877 * @param [in]table0 Register containing table 0 of crc constants. 2878 * @param [in]table1 Register containing table 1 of crc constants. 2879 * @param [in]table2 Register containing table 2 of crc constants. 2880 * @param [in]table3 Register containing table 3 of crc constants. 2881 * 2882 * uint32_t crc; 2883 * v = crc ^ v 2884 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 2885 * 2886 */ 2887 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 2888 Register table0, Register table1, Register table2, Register table3, 2889 bool upper) { 2890 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 2891 uxtb(tmp, v); 2892 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 2893 ubfx(tmp, v, 8, 8); 2894 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 2895 eor(crc, crc, tmp); 2896 ubfx(tmp, v, 16, 8); 2897 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 2898 eor(crc, crc, tmp); 2899 ubfx(tmp, v, 24, 8); 2900 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 2901 eor(crc, crc, tmp); 2902 } 2903 2904 /** 2905 * @param crc register containing existing CRC (32-bit) 2906 * @param buf register pointing to input byte buffer (byte*) 2907 * @param len register containing number of bytes 2908 * @param table register that will contain address of CRC table 2909 * @param tmp scratch register 2910 */ 2911 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 2912 Register table0, Register table1, Register table2, Register table3, 2913 Register tmp, Register tmp2, Register tmp3) { 2914 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 2915 unsigned long offset; 2916 2917 ornw(crc, zr, crc); 2918 2919 if (UseCRC32) { 2920 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2921 2922 subs(len, len, 64); 2923 br(Assembler::GE, CRC_by64_loop); 2924 adds(len, len, 64-4); 2925 br(Assembler::GE, CRC_by4_loop); 2926 adds(len, len, 4); 2927 br(Assembler::GT, CRC_by1_loop); 2928 b(L_exit); 2929 2930 BIND(CRC_by4_loop); 2931 ldrw(tmp, Address(post(buf, 4))); 2932 subs(len, len, 4); 2933 crc32w(crc, crc, tmp); 2934 br(Assembler::GE, CRC_by4_loop); 2935 adds(len, len, 4); 2936 br(Assembler::LE, L_exit); 2937 BIND(CRC_by1_loop); 2938 ldrb(tmp, Address(post(buf, 1))); 2939 subs(len, len, 1); 2940 crc32b(crc, crc, tmp); 2941 br(Assembler::GT, CRC_by1_loop); 2942 b(L_exit); 2943 2944 align(CodeEntryAlignment); 2945 BIND(CRC_by64_loop); 2946 subs(len, len, 64); 2947 ldp(tmp, tmp3, Address(post(buf, 16))); 2948 crc32x(crc, crc, tmp); 2949 crc32x(crc, crc, tmp3); 2950 ldp(tmp, tmp3, Address(post(buf, 16))); 2951 crc32x(crc, crc, tmp); 2952 crc32x(crc, crc, tmp3); 2953 ldp(tmp, tmp3, Address(post(buf, 16))); 2954 crc32x(crc, crc, tmp); 2955 crc32x(crc, crc, tmp3); 2956 ldp(tmp, tmp3, Address(post(buf, 16))); 2957 crc32x(crc, crc, tmp); 2958 crc32x(crc, crc, tmp3); 2959 br(Assembler::GE, CRC_by64_loop); 2960 adds(len, len, 64-4); 2961 br(Assembler::GE, CRC_by4_loop); 2962 adds(len, len, 4); 2963 br(Assembler::GT, CRC_by1_loop); 2964 BIND(L_exit); 2965 ornw(crc, zr, crc); 2966 return; 2967 } 2968 2969 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2970 if (offset) add(table0, table0, offset); 2971 add(table1, table0, 1*256*sizeof(juint)); 2972 add(table2, table0, 2*256*sizeof(juint)); 2973 add(table3, table0, 3*256*sizeof(juint)); 2974 2975 if (UseNeon) { 2976 cmp(len, 64); 2977 br(Assembler::LT, L_by16); 2978 eor(v16, T16B, v16, v16); 2979 2980 Label L_fold; 2981 2982 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 2983 2984 ld1(v0, v1, T2D, post(buf, 32)); 2985 ld1r(v4, T2D, post(tmp, 8)); 2986 ld1r(v5, T2D, post(tmp, 8)); 2987 ld1r(v6, T2D, post(tmp, 8)); 2988 ld1r(v7, T2D, post(tmp, 8)); 2989 mov(v16, T4S, 0, crc); 2990 2991 eor(v0, T16B, v0, v16); 2992 sub(len, len, 64); 2993 2994 BIND(L_fold); 2995 pmull(v22, T8H, v0, v5, T8B); 2996 pmull(v20, T8H, v0, v7, T8B); 2997 pmull(v23, T8H, v0, v4, T8B); 2998 pmull(v21, T8H, v0, v6, T8B); 2999 3000 pmull2(v18, T8H, v0, v5, T16B); 3001 pmull2(v16, T8H, v0, v7, T16B); 3002 pmull2(v19, T8H, v0, v4, T16B); 3003 pmull2(v17, T8H, v0, v6, T16B); 3004 3005 uzp1(v24, v20, v22, T8H); 3006 uzp2(v25, v20, v22, T8H); 3007 eor(v20, T16B, v24, v25); 3008 3009 uzp1(v26, v16, v18, T8H); 3010 uzp2(v27, v16, v18, T8H); 3011 eor(v16, T16B, v26, v27); 3012 3013 ushll2(v22, T4S, v20, T8H, 8); 3014 ushll(v20, T4S, v20, T4H, 8); 3015 3016 ushll2(v18, T4S, v16, T8H, 8); 3017 ushll(v16, T4S, v16, T4H, 8); 3018 3019 eor(v22, T16B, v23, v22); 3020 eor(v18, T16B, v19, v18); 3021 eor(v20, T16B, v21, v20); 3022 eor(v16, T16B, v17, v16); 3023 3024 uzp1(v17, v16, v20, T2D); 3025 uzp2(v21, v16, v20, T2D); 3026 eor(v17, T16B, v17, v21); 3027 3028 ushll2(v20, T2D, v17, T4S, 16); 3029 ushll(v16, T2D, v17, T2S, 16); 3030 3031 eor(v20, T16B, v20, v22); 3032 eor(v16, T16B, v16, v18); 3033 3034 uzp1(v17, v20, v16, T2D); 3035 uzp2(v21, v20, v16, T2D); 3036 eor(v28, T16B, v17, v21); 3037 3038 pmull(v22, T8H, v1, v5, T8B); 3039 pmull(v20, T8H, v1, v7, T8B); 3040 pmull(v23, T8H, v1, v4, T8B); 3041 pmull(v21, T8H, v1, v6, T8B); 3042 3043 pmull2(v18, T8H, v1, v5, T16B); 3044 pmull2(v16, T8H, v1, v7, T16B); 3045 pmull2(v19, T8H, v1, v4, T16B); 3046 pmull2(v17, T8H, v1, v6, T16B); 3047 3048 ld1(v0, v1, T2D, post(buf, 32)); 3049 3050 uzp1(v24, v20, v22, T8H); 3051 uzp2(v25, v20, v22, T8H); 3052 eor(v20, T16B, v24, v25); 3053 3054 uzp1(v26, v16, v18, T8H); 3055 uzp2(v27, v16, v18, T8H); 3056 eor(v16, T16B, v26, v27); 3057 3058 ushll2(v22, T4S, v20, T8H, 8); 3059 ushll(v20, T4S, v20, T4H, 8); 3060 3061 ushll2(v18, T4S, v16, T8H, 8); 3062 ushll(v16, T4S, v16, T4H, 8); 3063 3064 eor(v22, T16B, v23, v22); 3065 eor(v18, T16B, v19, v18); 3066 eor(v20, T16B, v21, v20); 3067 eor(v16, T16B, v17, v16); 3068 3069 uzp1(v17, v16, v20, T2D); 3070 uzp2(v21, v16, v20, T2D); 3071 eor(v16, T16B, v17, v21); 3072 3073 ushll2(v20, T2D, v16, T4S, 16); 3074 ushll(v16, T2D, v16, T2S, 16); 3075 3076 eor(v20, T16B, v22, v20); 3077 eor(v16, T16B, v16, v18); 3078 3079 uzp1(v17, v20, v16, T2D); 3080 uzp2(v21, v20, v16, T2D); 3081 eor(v20, T16B, v17, v21); 3082 3083 shl(v16, T2D, v28, 1); 3084 shl(v17, T2D, v20, 1); 3085 3086 eor(v0, T16B, v0, v16); 3087 eor(v1, T16B, v1, v17); 3088 3089 subs(len, len, 32); 3090 br(Assembler::GE, L_fold); 3091 3092 mov(crc, 0); 3093 mov(tmp, v0, T1D, 0); 3094 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3095 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3096 mov(tmp, v0, T1D, 1); 3097 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3098 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3099 mov(tmp, v1, T1D, 0); 3100 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3101 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3102 mov(tmp, v1, T1D, 1); 3103 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3104 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3105 3106 add(len, len, 32); 3107 } 3108 3109 BIND(L_by16); 3110 subs(len, len, 16); 3111 br(Assembler::GE, L_by16_loop); 3112 adds(len, len, 16-4); 3113 br(Assembler::GE, L_by4_loop); 3114 adds(len, len, 4); 3115 br(Assembler::GT, L_by1_loop); 3116 b(L_exit); 3117 3118 BIND(L_by4_loop); 3119 ldrw(tmp, Address(post(buf, 4))); 3120 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 3121 subs(len, len, 4); 3122 br(Assembler::GE, L_by4_loop); 3123 adds(len, len, 4); 3124 br(Assembler::LE, L_exit); 3125 BIND(L_by1_loop); 3126 subs(len, len, 1); 3127 ldrb(tmp, Address(post(buf, 1))); 3128 update_byte_crc32(crc, tmp, table0); 3129 br(Assembler::GT, L_by1_loop); 3130 b(L_exit); 3131 3132 align(CodeEntryAlignment); 3133 BIND(L_by16_loop); 3134 subs(len, len, 16); 3135 ldp(tmp, tmp3, Address(post(buf, 16))); 3136 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 3137 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 3138 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 3139 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 3140 br(Assembler::GE, L_by16_loop); 3141 adds(len, len, 16-4); 3142 br(Assembler::GE, L_by4_loop); 3143 adds(len, len, 4); 3144 br(Assembler::GT, L_by1_loop); 3145 BIND(L_exit); 3146 ornw(crc, zr, crc); 3147 } 3148 3149 /** 3150 * @param crc register containing existing CRC (32-bit) 3151 * @param buf register pointing to input byte buffer (byte*) 3152 * @param len register containing number of bytes 3153 * @param table register that will contain address of CRC table 3154 * @param tmp scratch register 3155 */ 3156 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 3157 Register table0, Register table1, Register table2, Register table3, 3158 Register tmp, Register tmp2, Register tmp3) { 3159 Label L_exit; 3160 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 3161 3162 subs(len, len, 64); 3163 br(Assembler::GE, CRC_by64_loop); 3164 adds(len, len, 64-4); 3165 br(Assembler::GE, CRC_by4_loop); 3166 adds(len, len, 4); 3167 br(Assembler::GT, CRC_by1_loop); 3168 b(L_exit); 3169 3170 BIND(CRC_by4_loop); 3171 ldrw(tmp, Address(post(buf, 4))); 3172 subs(len, len, 4); 3173 crc32cw(crc, crc, tmp); 3174 br(Assembler::GE, CRC_by4_loop); 3175 adds(len, len, 4); 3176 br(Assembler::LE, L_exit); 3177 BIND(CRC_by1_loop); 3178 ldrb(tmp, Address(post(buf, 1))); 3179 subs(len, len, 1); 3180 crc32cb(crc, crc, tmp); 3181 br(Assembler::GT, CRC_by1_loop); 3182 b(L_exit); 3183 3184 align(CodeEntryAlignment); 3185 BIND(CRC_by64_loop); 3186 subs(len, len, 64); 3187 ldp(tmp, tmp3, Address(post(buf, 16))); 3188 crc32cx(crc, crc, tmp); 3189 crc32cx(crc, crc, tmp3); 3190 ldp(tmp, tmp3, Address(post(buf, 16))); 3191 crc32cx(crc, crc, tmp); 3192 crc32cx(crc, crc, tmp3); 3193 ldp(tmp, tmp3, Address(post(buf, 16))); 3194 crc32cx(crc, crc, tmp); 3195 crc32cx(crc, crc, tmp3); 3196 ldp(tmp, tmp3, Address(post(buf, 16))); 3197 crc32cx(crc, crc, tmp); 3198 crc32cx(crc, crc, tmp3); 3199 br(Assembler::GE, CRC_by64_loop); 3200 adds(len, len, 64-4); 3201 br(Assembler::GE, CRC_by4_loop); 3202 adds(len, len, 4); 3203 br(Assembler::GT, CRC_by1_loop); 3204 BIND(L_exit); 3205 return; 3206 } 3207 3208 SkipIfEqual::SkipIfEqual( 3209 MacroAssembler* masm, const bool* flag_addr, bool value) { 3210 _masm = masm; 3211 unsigned long offset; 3212 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 3213 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 3214 _masm->cbzw(rscratch1, _label); 3215 } 3216 3217 SkipIfEqual::~SkipIfEqual() { 3218 _masm->bind(_label); 3219 } 3220 3221 void MacroAssembler::addptr(const Address &dst, int32_t src) { 3222 Address adr; 3223 switch(dst.getMode()) { 3224 case Address::base_plus_offset: 3225 // This is the expected mode, although we allow all the other 3226 // forms below. 3227 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 3228 break; 3229 default: 3230 lea(rscratch2, dst); 3231 adr = Address(rscratch2); 3232 break; 3233 } 3234 ldr(rscratch1, adr); 3235 add(rscratch1, rscratch1, src); 3236 str(rscratch1, adr); 3237 } 3238 3239 void MacroAssembler::cmpptr(Register src1, Address src2) { 3240 unsigned long offset; 3241 adrp(rscratch1, src2, offset); 3242 ldr(rscratch1, Address(rscratch1, offset)); 3243 cmp(src1, rscratch1); 3244 } 3245 3246 void MacroAssembler::store_check(Register obj, Address dst) { 3247 store_check(obj); 3248 } 3249 3250 void MacroAssembler::store_check(Register obj) { 3251 // Does a store check for the oop in register obj. The content of 3252 // register obj is destroyed afterwards. 3253 3254 BarrierSet* bs = Universe::heap()->barrier_set(); 3255 assert(bs->kind() == BarrierSet::CardTableForRS || 3256 bs->kind() == BarrierSet::CardTableExtension, 3257 "Wrong barrier set kind"); 3258 3259 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 3260 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3261 3262 lsr(obj, obj, CardTableModRefBS::card_shift); 3263 3264 assert(CardTableModRefBS::dirty_card_val() == 0, "must be"); 3265 3266 load_byte_map_base(rscratch1); 3267 3268 if (UseCondCardMark) { 3269 Label L_already_dirty; 3270 membar(StoreLoad); 3271 ldrb(rscratch2, Address(obj, rscratch1)); 3272 cbz(rscratch2, L_already_dirty); 3273 strb(zr, Address(obj, rscratch1)); 3274 bind(L_already_dirty); 3275 } else { 3276 if (UseConcMarkSweepGC && CMSPrecleaningEnabled) { 3277 membar(StoreStore); 3278 } 3279 strb(zr, Address(obj, rscratch1)); 3280 } 3281 } 3282 3283 void MacroAssembler::load_klass(Register dst, Register src) { 3284 if (UseCompressedClassPointers) { 3285 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3286 decode_klass_not_null(dst); 3287 } else { 3288 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3289 } 3290 } 3291 3292 void MacroAssembler::load_mirror(Register dst, Register method) { 3293 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3294 ldr(dst, Address(rmethod, Method::const_offset())); 3295 ldr(dst, Address(dst, ConstMethod::constants_offset())); 3296 ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); 3297 ldr(dst, Address(dst, mirror_offset)); 3298 } 3299 3300 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 3301 if (UseCompressedClassPointers) { 3302 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3303 if (Universe::narrow_klass_base() == NULL) { 3304 cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift()); 3305 return; 3306 } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3307 && Universe::narrow_klass_shift() == 0) { 3308 // Only the bottom 32 bits matter 3309 cmpw(trial_klass, tmp); 3310 return; 3311 } 3312 decode_klass_not_null(tmp); 3313 } else { 3314 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3315 } 3316 cmp(trial_klass, tmp); 3317 } 3318 3319 void MacroAssembler::load_prototype_header(Register dst, Register src) { 3320 load_klass(dst, src); 3321 ldr(dst, Address(dst, Klass::prototype_header_offset())); 3322 } 3323 3324 void MacroAssembler::store_klass(Register dst, Register src) { 3325 // FIXME: Should this be a store release? concurrent gcs assumes 3326 // klass length is valid if klass field is not null. 3327 if (UseCompressedClassPointers) { 3328 encode_klass_not_null(src); 3329 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3330 } else { 3331 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3332 } 3333 } 3334 3335 void MacroAssembler::store_klass_gap(Register dst, Register src) { 3336 if (UseCompressedClassPointers) { 3337 // Store to klass gap in destination 3338 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 3339 } 3340 } 3341 3342 // Algorithm must match oop.inline.hpp encode_heap_oop. 3343 void MacroAssembler::encode_heap_oop(Register d, Register s) { 3344 #ifdef ASSERT 3345 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 3346 #endif 3347 verify_oop(s, "broken oop in encode_heap_oop"); 3348 if (Universe::narrow_oop_base() == NULL) { 3349 if (Universe::narrow_oop_shift() != 0) { 3350 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3351 lsr(d, s, LogMinObjAlignmentInBytes); 3352 } else { 3353 mov(d, s); 3354 } 3355 } else { 3356 subs(d, s, rheapbase); 3357 csel(d, d, zr, Assembler::HS); 3358 lsr(d, d, LogMinObjAlignmentInBytes); 3359 3360 /* Old algorithm: is this any worse? 3361 Label nonnull; 3362 cbnz(r, nonnull); 3363 sub(r, r, rheapbase); 3364 bind(nonnull); 3365 lsr(r, r, LogMinObjAlignmentInBytes); 3366 */ 3367 } 3368 } 3369 3370 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3371 #ifdef ASSERT 3372 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 3373 if (CheckCompressedOops) { 3374 Label ok; 3375 cbnz(r, ok); 3376 stop("null oop passed to encode_heap_oop_not_null"); 3377 bind(ok); 3378 } 3379 #endif 3380 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 3381 if (Universe::narrow_oop_base() != NULL) { 3382 sub(r, r, rheapbase); 3383 } 3384 if (Universe::narrow_oop_shift() != 0) { 3385 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3386 lsr(r, r, LogMinObjAlignmentInBytes); 3387 } 3388 } 3389 3390 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 3391 #ifdef ASSERT 3392 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 3393 if (CheckCompressedOops) { 3394 Label ok; 3395 cbnz(src, ok); 3396 stop("null oop passed to encode_heap_oop_not_null2"); 3397 bind(ok); 3398 } 3399 #endif 3400 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 3401 3402 Register data = src; 3403 if (Universe::narrow_oop_base() != NULL) { 3404 sub(dst, src, rheapbase); 3405 data = dst; 3406 } 3407 if (Universe::narrow_oop_shift() != 0) { 3408 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3409 lsr(dst, data, LogMinObjAlignmentInBytes); 3410 data = dst; 3411 } 3412 if (data == src) 3413 mov(dst, src); 3414 } 3415 3416 void MacroAssembler::decode_heap_oop(Register d, Register s) { 3417 #ifdef ASSERT 3418 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 3419 #endif 3420 if (Universe::narrow_oop_base() == NULL) { 3421 if (Universe::narrow_oop_shift() != 0 || d != s) { 3422 lsl(d, s, Universe::narrow_oop_shift()); 3423 } 3424 } else { 3425 Label done; 3426 if (d != s) 3427 mov(d, s); 3428 cbz(s, done); 3429 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 3430 bind(done); 3431 } 3432 verify_oop(d, "broken oop in decode_heap_oop"); 3433 } 3434 3435 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3436 assert (UseCompressedOops, "should only be used for compressed headers"); 3437 assert (Universe::heap() != NULL, "java heap should be initialized"); 3438 // Cannot assert, unverified entry point counts instructions (see .ad file) 3439 // vtableStubs also counts instructions in pd_code_size_limit. 3440 // Also do not verify_oop as this is called by verify_oop. 3441 if (Universe::narrow_oop_shift() != 0) { 3442 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3443 if (Universe::narrow_oop_base() != NULL) { 3444 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3445 } else { 3446 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3447 } 3448 } else { 3449 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3450 } 3451 } 3452 3453 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 3454 assert (UseCompressedOops, "should only be used for compressed headers"); 3455 assert (Universe::heap() != NULL, "java heap should be initialized"); 3456 // Cannot assert, unverified entry point counts instructions (see .ad file) 3457 // vtableStubs also counts instructions in pd_code_size_limit. 3458 // Also do not verify_oop as this is called by verify_oop. 3459 if (Universe::narrow_oop_shift() != 0) { 3460 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3461 if (Universe::narrow_oop_base() != NULL) { 3462 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3463 } else { 3464 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3465 } 3466 } else { 3467 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3468 if (dst != src) { 3469 mov(dst, src); 3470 } 3471 } 3472 } 3473 3474 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3475 if (Universe::narrow_klass_base() == NULL) { 3476 if (Universe::narrow_klass_shift() != 0) { 3477 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3478 lsr(dst, src, LogKlassAlignmentInBytes); 3479 } else { 3480 if (dst != src) mov(dst, src); 3481 } 3482 return; 3483 } 3484 3485 if (use_XOR_for_compressed_class_base) { 3486 if (Universe::narrow_klass_shift() != 0) { 3487 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3488 lsr(dst, dst, LogKlassAlignmentInBytes); 3489 } else { 3490 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3491 } 3492 return; 3493 } 3494 3495 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3496 && Universe::narrow_klass_shift() == 0) { 3497 movw(dst, src); 3498 return; 3499 } 3500 3501 #ifdef ASSERT 3502 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 3503 #endif 3504 3505 Register rbase = dst; 3506 if (dst == src) rbase = rheapbase; 3507 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3508 sub(dst, src, rbase); 3509 if (Universe::narrow_klass_shift() != 0) { 3510 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3511 lsr(dst, dst, LogKlassAlignmentInBytes); 3512 } 3513 if (dst == src) reinit_heapbase(); 3514 } 3515 3516 void MacroAssembler::encode_klass_not_null(Register r) { 3517 encode_klass_not_null(r, r); 3518 } 3519 3520 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3521 Register rbase = dst; 3522 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3523 3524 if (Universe::narrow_klass_base() == NULL) { 3525 if (Universe::narrow_klass_shift() != 0) { 3526 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3527 lsl(dst, src, LogKlassAlignmentInBytes); 3528 } else { 3529 if (dst != src) mov(dst, src); 3530 } 3531 return; 3532 } 3533 3534 if (use_XOR_for_compressed_class_base) { 3535 if (Universe::narrow_klass_shift() != 0) { 3536 lsl(dst, src, LogKlassAlignmentInBytes); 3537 eor(dst, dst, (uint64_t)Universe::narrow_klass_base()); 3538 } else { 3539 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3540 } 3541 return; 3542 } 3543 3544 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3545 && Universe::narrow_klass_shift() == 0) { 3546 if (dst != src) 3547 movw(dst, src); 3548 movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32); 3549 return; 3550 } 3551 3552 // Cannot assert, unverified entry point counts instructions (see .ad file) 3553 // vtableStubs also counts instructions in pd_code_size_limit. 3554 // Also do not verify_oop as this is called by verify_oop. 3555 if (dst == src) rbase = rheapbase; 3556 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3557 if (Universe::narrow_klass_shift() != 0) { 3558 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3559 add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); 3560 } else { 3561 add(dst, rbase, src); 3562 } 3563 if (dst == src) reinit_heapbase(); 3564 } 3565 3566 void MacroAssembler::decode_klass_not_null(Register r) { 3567 decode_klass_not_null(r, r); 3568 } 3569 3570 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 3571 assert (UseCompressedOops, "should only be used for compressed oops"); 3572 assert (Universe::heap() != NULL, "java heap should be initialized"); 3573 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3574 3575 int oop_index = oop_recorder()->find_index(obj); 3576 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3577 3578 InstructionMark im(this); 3579 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3580 code_section()->relocate(inst_mark(), rspec); 3581 movz(dst, 0xDEAD, 16); 3582 movk(dst, 0xBEEF); 3583 } 3584 3585 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 3586 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3587 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3588 int index = oop_recorder()->find_index(k); 3589 assert(! Universe::heap()->is_in_reserved(k), "should not be an oop"); 3590 3591 InstructionMark im(this); 3592 RelocationHolder rspec = metadata_Relocation::spec(index); 3593 code_section()->relocate(inst_mark(), rspec); 3594 narrowKlass nk = Klass::encode_klass(k); 3595 movz(dst, (nk >> 16), 16); 3596 movk(dst, nk & 0xffff); 3597 } 3598 3599 void MacroAssembler::load_heap_oop(Register dst, Address src) 3600 { 3601 if (UseCompressedOops) { 3602 ldrw(dst, src); 3603 decode_heap_oop(dst); 3604 } else { 3605 ldr(dst, src); 3606 } 3607 } 3608 3609 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) 3610 { 3611 if (UseCompressedOops) { 3612 ldrw(dst, src); 3613 decode_heap_oop_not_null(dst); 3614 } else { 3615 ldr(dst, src); 3616 } 3617 } 3618 3619 void MacroAssembler::store_heap_oop(Address dst, Register src) { 3620 shenandoah_store_check(src, dst); 3621 if (UseCompressedOops) { 3622 assert(!dst.uses(src), "not enough registers"); 3623 encode_heap_oop(src); 3624 strw(src, dst); 3625 } else 3626 str(src, dst); 3627 } 3628 3629 // Used for storing NULLs. 3630 void MacroAssembler::store_heap_oop_null(Address dst) { 3631 if (UseCompressedOops) { 3632 strw(zr, dst); 3633 } else 3634 str(zr, dst); 3635 } 3636 3637 #if INCLUDE_ALL_GCS 3638 void MacroAssembler::g1_write_barrier_pre(Register obj, 3639 Register pre_val, 3640 Register thread, 3641 Register tmp, 3642 bool tosca_live, 3643 bool expand_call) { 3644 // If expand_call is true then we expand the call_VM_leaf macro 3645 // directly to skip generating the check by 3646 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 3647 3648 assert(thread == rthread, "must be"); 3649 3650 Label done; 3651 Label runtime; 3652 3653 assert(pre_val != noreg, "check this code"); 3654 3655 if (obj != noreg) 3656 assert_different_registers(obj, pre_val, tmp); 3657 3658 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3659 SATBMarkQueue::byte_offset_of_active())); 3660 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3661 SATBMarkQueue::byte_offset_of_index())); 3662 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3663 SATBMarkQueue::byte_offset_of_buf())); 3664 3665 3666 // Is marking active? 3667 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3668 ldrw(tmp, in_progress); 3669 } else { 3670 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 3671 ldrb(tmp, in_progress); 3672 } 3673 cbzw(tmp, done); 3674 3675 // Do we need to load the previous value? 3676 if (obj != noreg) { 3677 load_heap_oop(pre_val, Address(obj, 0)); 3678 } 3679 3680 // Is the previous value null? 3681 cbz(pre_val, done); 3682 3683 // Can we store original value in the thread's buffer? 3684 // Is index == 0? 3685 // (The index field is typed as size_t.) 3686 3687 ldr(tmp, index); // tmp := *index_adr 3688 cbz(tmp, runtime); // tmp == 0? 3689 // If yes, goto runtime 3690 3691 sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 3692 str(tmp, index); // *index_adr := tmp 3693 ldr(rscratch1, buffer); 3694 add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 3695 3696 // Record the previous value 3697 str(pre_val, Address(tmp, 0)); 3698 b(done); 3699 3700 bind(runtime); 3701 // save the live input values 3702 push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3703 3704 // Calling the runtime using the regular call_VM_leaf mechanism generates 3705 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 3706 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 3707 // 3708 // If we care generating the pre-barrier without a frame (e.g. in the 3709 // intrinsified Reference.get() routine) then ebp might be pointing to 3710 // the caller frame and so this check will most likely fail at runtime. 3711 // 3712 // Expanding the call directly bypasses the generation of the check. 3713 // So when we do not have have a full interpreter frame on the stack 3714 // expand_call should be passed true. 3715 3716 if (expand_call) { 3717 assert(pre_val != c_rarg1, "smashed arg"); 3718 pass_arg1(this, thread); 3719 pass_arg0(this, pre_val); 3720 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 3721 } else { 3722 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 3723 } 3724 3725 pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3726 3727 bind(done); 3728 } 3729 3730 void MacroAssembler::g1_write_barrier_post(Register store_addr, 3731 Register new_val, 3732 Register thread, 3733 Register tmp, 3734 Register tmp2) { 3735 assert(thread == rthread, "must be"); 3736 3737 if (UseShenandoahGC) { 3738 // No need for this in Shenandoah. 3739 return; 3740 } 3741 3742 assert(UseG1GC, "expect G1 GC"); 3743 3744 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3745 DirtyCardQueue::byte_offset_of_index())); 3746 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3747 DirtyCardQueue::byte_offset_of_buf())); 3748 3749 BarrierSet* bs = Universe::heap()->barrier_set(); 3750 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 3751 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3752 3753 Label done; 3754 Label runtime; 3755 3756 // Does store cross heap regions? 3757 3758 eor(tmp, store_addr, new_val); 3759 lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes); 3760 cbz(tmp, done); 3761 3762 // crosses regions, storing NULL? 3763 3764 cbz(new_val, done); 3765 3766 // storing region crossing non-NULL, is card already dirty? 3767 3768 ExternalAddress cardtable((address) ct->byte_map_base); 3769 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3770 const Register card_addr = tmp; 3771 3772 lsr(card_addr, store_addr, CardTableModRefBS::card_shift); 3773 3774 // get the address of the card 3775 load_byte_map_base(tmp2); 3776 add(card_addr, card_addr, tmp2); 3777 ldrb(tmp2, Address(card_addr)); 3778 cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3779 br(Assembler::EQ, done); 3780 3781 assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); 3782 3783 membar(Assembler::StoreLoad); 3784 3785 ldrb(tmp2, Address(card_addr)); 3786 cbzw(tmp2, done); 3787 3788 // storing a region crossing, non-NULL oop, card is clean. 3789 // dirty card and log. 3790 3791 strb(zr, Address(card_addr)); 3792 3793 ldr(rscratch1, queue_index); 3794 cbz(rscratch1, runtime); 3795 sub(rscratch1, rscratch1, wordSize); 3796 str(rscratch1, queue_index); 3797 3798 ldr(tmp2, buffer); 3799 str(card_addr, Address(tmp2, rscratch1)); 3800 b(done); 3801 3802 bind(runtime); 3803 // save the live input values 3804 push(store_addr->bit(true) | new_val->bit(true), sp); 3805 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 3806 pop(store_addr->bit(true) | new_val->bit(true), sp); 3807 3808 bind(done); 3809 } 3810 3811 #endif // INCLUDE_ALL_GCS 3812 3813 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 3814 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 3815 int index = oop_recorder()->allocate_metadata_index(obj); 3816 RelocationHolder rspec = metadata_Relocation::spec(index); 3817 return Address((address)obj, rspec); 3818 } 3819 3820 // Move an oop into a register. immediate is true if we want 3821 // immediate instrcutions, i.e. we are not going to patch this 3822 // instruction while the code is being executed by another thread. In 3823 // that case we can use move immediates rather than the constant pool. 3824 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { 3825 int oop_index; 3826 if (obj == NULL) { 3827 oop_index = oop_recorder()->allocate_oop_index(obj); 3828 } else { 3829 oop_index = oop_recorder()->find_index(obj); 3830 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3831 } 3832 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3833 if (! immediate) { 3834 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 3835 ldr_constant(dst, Address(dummy, rspec)); 3836 } else 3837 mov(dst, Address((address)obj, rspec)); 3838 } 3839 3840 // Move a metadata address into a register. 3841 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 3842 int oop_index; 3843 if (obj == NULL) { 3844 oop_index = oop_recorder()->allocate_metadata_index(obj); 3845 } else { 3846 oop_index = oop_recorder()->find_index(obj); 3847 } 3848 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 3849 mov(dst, Address((address)obj, rspec)); 3850 } 3851 3852 Address MacroAssembler::constant_oop_address(jobject obj) { 3853 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3854 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 3855 int oop_index = oop_recorder()->find_index(obj); 3856 return Address((address)obj, oop_Relocation::spec(oop_index)); 3857 } 3858 3859 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3860 void MacroAssembler::tlab_allocate(Register obj, 3861 Register var_size_in_bytes, 3862 int con_size_in_bytes, 3863 Register t1, 3864 Register t2, 3865 Label& slow_case) { 3866 assert_different_registers(obj, t2); 3867 assert_different_registers(obj, var_size_in_bytes); 3868 Register end = t2; 3869 3870 // verify_tlab(); 3871 3872 int oop_extra_words = Universe::heap()->oop_extra_words(); 3873 3874 ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 3875 if (var_size_in_bytes == noreg) { 3876 lea(end, Address(obj, con_size_in_bytes + oop_extra_words * HeapWordSize)); 3877 } else { 3878 if (oop_extra_words > 0) { 3879 add(var_size_in_bytes, var_size_in_bytes, oop_extra_words * HeapWordSize); 3880 } 3881 lea(end, Address(obj, var_size_in_bytes)); 3882 } 3883 ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 3884 cmp(end, rscratch1); 3885 br(Assembler::HI, slow_case); 3886 3887 // update the tlab top pointer 3888 str(end, Address(rthread, JavaThread::tlab_top_offset())); 3889 3890 Universe::heap()->compile_prepare_oop(this, obj); 3891 3892 // recover var_size_in_bytes if necessary 3893 if (var_size_in_bytes == end) { 3894 sub(var_size_in_bytes, var_size_in_bytes, obj); 3895 } 3896 // verify_tlab(); 3897 } 3898 3899 // Preserves r19, and r3. 3900 Register MacroAssembler::tlab_refill(Label& retry, 3901 Label& try_eden, 3902 Label& slow_case) { 3903 Register top = r0; 3904 Register t1 = r2; 3905 Register t2 = r4; 3906 assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); 3907 Label do_refill, discard_tlab; 3908 3909 if (!Universe::heap()->supports_inline_contig_alloc()) { 3910 // No allocation in the shared eden. 3911 b(slow_case); 3912 } 3913 3914 ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3915 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3916 3917 // calculate amount of free space 3918 sub(t1, t1, top); 3919 lsr(t1, t1, LogHeapWordSize); 3920 3921 // Retain tlab and allocate object in shared space if 3922 // the amount free in the tlab is too large to discard. 3923 3924 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3925 cmp(t1, rscratch1); 3926 br(Assembler::LE, discard_tlab); 3927 3928 // Retain 3929 // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3930 mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3931 add(rscratch1, rscratch1, t2); 3932 str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3933 3934 if (TLABStats) { 3935 // increment number of slow_allocations 3936 addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), 3937 1, rscratch1); 3938 } 3939 b(try_eden); 3940 3941 bind(discard_tlab); 3942 if (TLABStats) { 3943 // increment number of refills 3944 addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, 3945 rscratch1); 3946 // accumulate wastage -- t1 is amount free in tlab 3947 addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, 3948 rscratch1); 3949 } 3950 3951 // if tlab is currently allocated (top or end != null) then 3952 // fill [top, end + alignment_reserve) with array object 3953 cbz(top, do_refill); 3954 3955 // set up the mark word 3956 mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); 3957 str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); 3958 // set the length to the remaining space 3959 sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); 3960 add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); 3961 lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); 3962 strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); 3963 // set klass to intArrayKlass 3964 { 3965 unsigned long offset; 3966 // dubious reloc why not an oop reloc? 3967 adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), 3968 offset); 3969 ldr(t1, Address(rscratch1, offset)); 3970 } 3971 // store klass last. concurrent gcs assumes klass length is valid if 3972 // klass field is not null. 3973 store_klass(top, t1); 3974 3975 mov(t1, top); 3976 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3977 sub(t1, t1, rscratch1); 3978 incr_allocated_bytes(rthread, t1, 0, rscratch1); 3979 3980 // refill the tlab with an eden allocation 3981 bind(do_refill); 3982 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3983 lsl(t1, t1, LogHeapWordSize); 3984 // allocate new tlab, address returned in top 3985 eden_allocate(top, t1, 0, t2, slow_case); 3986 3987 // Check that t1 was preserved in eden_allocate. 3988 #ifdef ASSERT 3989 if (UseTLAB) { 3990 Label ok; 3991 Register tsize = r4; 3992 assert_different_registers(tsize, rthread, t1); 3993 str(tsize, Address(pre(sp, -16))); 3994 ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3995 lsl(tsize, tsize, LogHeapWordSize); 3996 cmp(t1, tsize); 3997 br(Assembler::EQ, ok); 3998 STOP("assert(t1 != tlab size)"); 3999 should_not_reach_here(); 4000 4001 bind(ok); 4002 ldr(tsize, Address(post(sp, 16))); 4003 } 4004 #endif 4005 str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 4006 str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 4007 add(top, top, t1); 4008 sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); 4009 str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 4010 verify_tlab(); 4011 b(retry); 4012 4013 return rthread; // for use by caller 4014 } 4015 4016 // Defines obj, preserves var_size_in_bytes 4017 void MacroAssembler::eden_allocate(Register obj, 4018 Register var_size_in_bytes, 4019 int con_size_in_bytes, 4020 Register t1, 4021 Label& slow_case) { 4022 assert_different_registers(obj, var_size_in_bytes, t1); 4023 if (!Universe::heap()->supports_inline_contig_alloc()) { 4024 b(slow_case); 4025 } else { 4026 Register end = t1; 4027 Register heap_end = rscratch2; 4028 Label retry; 4029 bind(retry); 4030 { 4031 unsigned long offset; 4032 adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); 4033 ldr(heap_end, Address(rscratch1, offset)); 4034 } 4035 4036 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 4037 4038 // Get the current top of the heap 4039 { 4040 unsigned long offset; 4041 adrp(rscratch1, heap_top, offset); 4042 // Use add() here after ARDP, rather than lea(). 4043 // lea() does not generate anything if its offset is zero. 4044 // However, relocs expect to find either an ADD or a load/store 4045 // insn after an ADRP. add() always generates an ADD insn, even 4046 // for add(Rn, Rn, 0). 4047 add(rscratch1, rscratch1, offset); 4048 ldaxr(obj, rscratch1); 4049 } 4050 4051 // Adjust it my the size of our new object 4052 if (var_size_in_bytes == noreg) { 4053 lea(end, Address(obj, con_size_in_bytes)); 4054 } else { 4055 lea(end, Address(obj, var_size_in_bytes)); 4056 } 4057 4058 // if end < obj then we wrapped around high memory 4059 cmp(end, obj); 4060 br(Assembler::LO, slow_case); 4061 4062 cmp(end, heap_end); 4063 br(Assembler::HI, slow_case); 4064 4065 // If heap_top hasn't been changed by some other thread, update it. 4066 stlxr(rscratch2, end, rscratch1); 4067 cbnzw(rscratch2, retry); 4068 } 4069 } 4070 4071 void MacroAssembler::verify_tlab() { 4072 #ifdef ASSERT 4073 if (UseTLAB && VerifyOops) { 4074 Label next, ok; 4075 4076 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 4077 4078 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 4079 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 4080 cmp(rscratch2, rscratch1); 4081 br(Assembler::HS, next); 4082 STOP("assert(top >= start)"); 4083 should_not_reach_here(); 4084 4085 bind(next); 4086 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 4087 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 4088 cmp(rscratch2, rscratch1); 4089 br(Assembler::HS, ok); 4090 STOP("assert(top <= end)"); 4091 should_not_reach_here(); 4092 4093 bind(ok); 4094 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 4095 } 4096 #endif 4097 } 4098 4099 // Writes to stack successive pages until offset reached to check for 4100 // stack overflow + shadow pages. This clobbers tmp. 4101 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 4102 assert_different_registers(tmp, size, rscratch1); 4103 mov(tmp, sp); 4104 // Bang stack for total size given plus shadow page size. 4105 // Bang one page at a time because large size can bang beyond yellow and 4106 // red zones. 4107 Label loop; 4108 mov(rscratch1, os::vm_page_size()); 4109 bind(loop); 4110 lea(tmp, Address(tmp, -os::vm_page_size())); 4111 subsw(size, size, rscratch1); 4112 str(size, Address(tmp)); 4113 br(Assembler::GT, loop); 4114 4115 // Bang down shadow pages too. 4116 // At this point, (tmp-0) is the last address touched, so don't 4117 // touch it again. (It was touched as (tmp-pagesize) but then tmp 4118 // was post-decremented.) Skip this address by starting at i=1, and 4119 // touch a few more pages below. N.B. It is important to touch all 4120 // the way down to and including i=StackShadowPages. 4121 for (int i = 0; i < (int)(JavaThread::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) { 4122 // this could be any sized move but this is can be a debugging crumb 4123 // so the bigger the better. 4124 lea(tmp, Address(tmp, -os::vm_page_size())); 4125 str(size, Address(tmp)); 4126 } 4127 } 4128 4129 4130 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) { 4131 unsigned long off; 4132 adrp(r, Address(page, rtype), off); 4133 InstructionMark im(this); 4134 code_section()->relocate(inst_mark(), rtype); 4135 ldrw(zr, Address(r, off)); 4136 return inst_mark(); 4137 } 4138 4139 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 4140 InstructionMark im(this); 4141 code_section()->relocate(inst_mark(), rtype); 4142 ldrw(zr, Address(r, 0)); 4143 return inst_mark(); 4144 } 4145 4146 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { 4147 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 4148 unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12; 4149 unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12; 4150 unsigned long dest_page = (unsigned long)dest.target() >> 12; 4151 long offset_low = dest_page - low_page; 4152 long offset_high = dest_page - high_page; 4153 4154 assert(is_valid_AArch64_address(dest.target()), "bad address"); 4155 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 4156 4157 InstructionMark im(this); 4158 code_section()->relocate(inst_mark(), dest.rspec()); 4159 // 8143067: Ensure that the adrp can reach the dest from anywhere within 4160 // the code cache so that if it is relocated we know it will still reach 4161 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 4162 _adrp(reg1, dest.target()); 4163 } else { 4164 unsigned long target = (unsigned long)dest.target(); 4165 unsigned long adrp_target 4166 = (target & 0xffffffffUL) | ((unsigned long)pc() & 0xffff00000000UL); 4167 4168 _adrp(reg1, (address)adrp_target); 4169 movk(reg1, target >> 32, 32); 4170 } 4171 byte_offset = (unsigned long)dest.target() & 0xfff; 4172 } 4173 4174 void MacroAssembler::load_byte_map_base(Register reg) { 4175 jbyte *byte_map_base = 4176 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base; 4177 4178 if (is_valid_AArch64_address((address)byte_map_base)) { 4179 // Strictly speaking the byte_map_base isn't an address at all, 4180 // and it might even be negative. 4181 unsigned long offset; 4182 adrp(reg, ExternalAddress((address)byte_map_base), offset); 4183 // We expect offset to be zero with most collectors. 4184 if (offset != 0) { 4185 add(reg, reg, offset); 4186 } 4187 } else { 4188 mov(reg, (uint64_t)byte_map_base); 4189 } 4190 } 4191 4192 void MacroAssembler::build_frame(int framesize) { 4193 assert(framesize > 0, "framesize must be > 0"); 4194 if (framesize < ((1 << 9) + 2 * wordSize)) { 4195 sub(sp, sp, framesize); 4196 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 4197 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 4198 } else { 4199 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 4200 if (PreserveFramePointer) mov(rfp, sp); 4201 if (framesize < ((1 << 12) + 2 * wordSize)) 4202 sub(sp, sp, framesize - 2 * wordSize); 4203 else { 4204 mov(rscratch1, framesize - 2 * wordSize); 4205 sub(sp, sp, rscratch1); 4206 } 4207 } 4208 } 4209 4210 void MacroAssembler::remove_frame(int framesize) { 4211 assert(framesize > 0, "framesize must be > 0"); 4212 if (framesize < ((1 << 9) + 2 * wordSize)) { 4213 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 4214 add(sp, sp, framesize); 4215 } else { 4216 if (framesize < ((1 << 12) + 2 * wordSize)) 4217 add(sp, sp, framesize - 2 * wordSize); 4218 else { 4219 mov(rscratch1, framesize - 2 * wordSize); 4220 add(sp, sp, rscratch1); 4221 } 4222 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 4223 } 4224 } 4225 4226 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr); 4227 4228 // Search for str1 in str2 and return index or -1 4229 void MacroAssembler::string_indexof(Register str2, Register str1, 4230 Register cnt2, Register cnt1, 4231 Register tmp1, Register tmp2, 4232 Register tmp3, Register tmp4, 4233 int icnt1, Register result, int ae) { 4234 Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH; 4235 4236 Register ch1 = rscratch1; 4237 Register ch2 = rscratch2; 4238 Register cnt1tmp = tmp1; 4239 Register cnt2tmp = tmp2; 4240 Register cnt1_neg = cnt1; 4241 Register cnt2_neg = cnt2; 4242 Register result_tmp = tmp4; 4243 4244 bool isL = ae == StrIntrinsicNode::LL; 4245 4246 bool str1_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL; 4247 bool str2_isL = ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::LU; 4248 int str1_chr_shift = str1_isL ? 0:1; 4249 int str2_chr_shift = str2_isL ? 0:1; 4250 int str1_chr_size = str1_isL ? 1:2; 4251 int str2_chr_size = str2_isL ? 1:2; 4252 chr_insn str1_load_1chr = str1_isL ? (chr_insn)&MacroAssembler::ldrb : 4253 (chr_insn)&MacroAssembler::ldrh; 4254 chr_insn str2_load_1chr = str2_isL ? (chr_insn)&MacroAssembler::ldrb : 4255 (chr_insn)&MacroAssembler::ldrh; 4256 chr_insn load_2chr = isL ? (chr_insn)&MacroAssembler::ldrh : (chr_insn)&MacroAssembler::ldrw; 4257 chr_insn load_4chr = isL ? (chr_insn)&MacroAssembler::ldrw : (chr_insn)&MacroAssembler::ldr; 4258 4259 // Note, inline_string_indexOf() generates checks: 4260 // if (substr.count > string.count) return -1; 4261 // if (substr.count == 0) return 0; 4262 4263 // We have two strings, a source string in str2, cnt2 and a pattern string 4264 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1. 4265 4266 // For larger pattern and source we use a simplified Boyer Moore algorithm. 4267 // With a small pattern and source we use linear scan. 4268 4269 if (icnt1 == -1) { 4270 cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 4271 ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use 4272 br(LO, LINEARSEARCH); // a byte array. 4273 cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM 4274 br(HS, LINEARSEARCH); 4275 } 4276 4277 // The Boyer Moore alogorithm is based on the description here:- 4278 // 4279 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm 4280 // 4281 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule 4282 // and the 'Good Suffix' rule. 4283 // 4284 // These rules are essentially heuristics for how far we can shift the 4285 // pattern along the search string. 4286 // 4287 // The implementation here uses the 'Bad Character' rule only because of the 4288 // complexity of initialisation for the 'Good Suffix' rule. 4289 // 4290 // This is also known as the Boyer-Moore-Horspool algorithm:- 4291 // 4292 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm 4293 // 4294 // #define ASIZE 128 4295 // 4296 // int bm(unsigned char *x, int m, unsigned char *y, int n) { 4297 // int i, j; 4298 // unsigned c; 4299 // unsigned char bc[ASIZE]; 4300 // 4301 // /* Preprocessing */ 4302 // for (i = 0; i < ASIZE; ++i) 4303 // bc[i] = 0; 4304 // for (i = 0; i < m - 1; ) { 4305 // c = x[i]; 4306 // ++i; 4307 // if (c < ASIZE) bc[c] = i; 4308 // } 4309 // 4310 // /* Searching */ 4311 // j = 0; 4312 // while (j <= n - m) { 4313 // c = y[i+j]; 4314 // if (x[m-1] == c) 4315 // for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i); 4316 // if (i < 0) return j; 4317 // if (c < ASIZE) 4318 // j = j - bc[y[j+m-1]] + m; 4319 // else 4320 // j += 1; // Advance by 1 only if char >= ASIZE 4321 // } 4322 // } 4323 4324 if (icnt1 == -1) { 4325 BIND(BM); 4326 4327 Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP; 4328 Label BMADV, BMMATCH, BMCHECKEND; 4329 4330 Register cnt1end = tmp2; 4331 Register str2end = cnt2; 4332 Register skipch = tmp2; 4333 4334 // Restrict ASIZE to 128 to reduce stack space/initialisation. 4335 // The presence of chars >= ASIZE in the target string does not affect 4336 // performance, but we must be careful not to initialise them in the stack 4337 // array. 4338 // The presence of chars >= ASIZE in the source string may adversely affect 4339 // performance since we can only advance by one when we encounter one. 4340 4341 stp(zr, zr, pre(sp, -128)); 4342 for (int i = 1; i < 8; i++) 4343 stp(zr, zr, Address(sp, i*16)); 4344 4345 mov(cnt1tmp, 0); 4346 sub(cnt1end, cnt1, 1); 4347 BIND(BCLOOP); 4348 (this->*str1_load_1chr)(ch1, Address(str1, cnt1tmp, Address::lsl(str1_chr_shift))); 4349 cmp(ch1, 128); 4350 add(cnt1tmp, cnt1tmp, 1); 4351 br(HS, BCSKIP); 4352 strb(cnt1tmp, Address(sp, ch1)); 4353 BIND(BCSKIP); 4354 cmp(cnt1tmp, cnt1end); 4355 br(LT, BCLOOP); 4356 4357 mov(result_tmp, str2); 4358 4359 sub(cnt2, cnt2, cnt1); 4360 add(str2end, str2, cnt2, LSL, str2_chr_shift); 4361 BIND(BMLOOPSTR2); 4362 sub(cnt1tmp, cnt1, 1); 4363 (this->*str1_load_1chr)(ch1, Address(str1, cnt1tmp, Address::lsl(str1_chr_shift))); 4364 (this->*str2_load_1chr)(skipch, Address(str2, cnt1tmp, Address::lsl(str2_chr_shift))); 4365 cmp(ch1, skipch); 4366 br(NE, BMSKIP); 4367 subs(cnt1tmp, cnt1tmp, 1); 4368 br(LT, BMMATCH); 4369 BIND(BMLOOPSTR1); 4370 (this->*str1_load_1chr)(ch1, Address(str1, cnt1tmp, Address::lsl(str1_chr_shift))); 4371 (this->*str2_load_1chr)(ch2, Address(str2, cnt1tmp, Address::lsl(str2_chr_shift))); 4372 cmp(ch1, ch2); 4373 br(NE, BMSKIP); 4374 subs(cnt1tmp, cnt1tmp, 1); 4375 br(GE, BMLOOPSTR1); 4376 BIND(BMMATCH); 4377 sub(result, str2, result_tmp); 4378 if (!str2_isL) lsr(result, result, 1); 4379 add(sp, sp, 128); 4380 b(DONE); 4381 BIND(BMADV); 4382 add(str2, str2, str2_chr_size); 4383 b(BMCHECKEND); 4384 BIND(BMSKIP); 4385 cmp(skipch, 128); 4386 br(HS, BMADV); 4387 ldrb(ch2, Address(sp, skipch)); 4388 add(str2, str2, cnt1, LSL, str2_chr_shift); 4389 sub(str2, str2, ch2, LSL, str2_chr_shift); 4390 BIND(BMCHECKEND); 4391 cmp(str2, str2end); 4392 br(LE, BMLOOPSTR2); 4393 add(sp, sp, 128); 4394 b(NOMATCH); 4395 } 4396 4397 BIND(LINEARSEARCH); 4398 { 4399 Label DO1, DO2, DO3; 4400 4401 Register str2tmp = tmp2; 4402 Register first = tmp3; 4403 4404 if (icnt1 == -1) 4405 { 4406 Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT; 4407 4408 cmp(cnt1, str1_isL == str2_isL ? 4 : 2); 4409 br(LT, DOSHORT); 4410 4411 sub(cnt2, cnt2, cnt1); 4412 mov(result_tmp, cnt2); 4413 4414 lea(str1, Address(str1, cnt1, Address::lsl(str1_chr_shift))); 4415 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4416 sub(cnt1_neg, zr, cnt1, LSL, str1_chr_shift); 4417 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4418 (this->*str1_load_1chr)(first, Address(str1, cnt1_neg)); 4419 4420 BIND(FIRST_LOOP); 4421 (this->*str2_load_1chr)(ch2, Address(str2, cnt2_neg)); 4422 cmp(first, ch2); 4423 br(EQ, STR1_LOOP); 4424 BIND(STR2_NEXT); 4425 adds(cnt2_neg, cnt2_neg, str2_chr_size); 4426 br(LE, FIRST_LOOP); 4427 b(NOMATCH); 4428 4429 BIND(STR1_LOOP); 4430 adds(cnt1tmp, cnt1_neg, str1_chr_size); 4431 add(cnt2tmp, cnt2_neg, str2_chr_size); 4432 br(GE, MATCH); 4433 4434 BIND(STR1_NEXT); 4435 (this->*str1_load_1chr)(ch1, Address(str1, cnt1tmp)); 4436 (this->*str2_load_1chr)(ch2, Address(str2, cnt2tmp)); 4437 cmp(ch1, ch2); 4438 br(NE, STR2_NEXT); 4439 adds(cnt1tmp, cnt1tmp, str1_chr_size); 4440 add(cnt2tmp, cnt2tmp, str2_chr_size); 4441 br(LT, STR1_NEXT); 4442 b(MATCH); 4443 4444 BIND(DOSHORT); 4445 if (str1_isL == str2_isL) { 4446 cmp(cnt1, 2); 4447 br(LT, DO1); 4448 br(GT, DO3); 4449 } 4450 } 4451 4452 if (icnt1 == 4) { 4453 Label CH1_LOOP; 4454 4455 (this->*load_4chr)(ch1, str1); 4456 sub(cnt2, cnt2, 4); 4457 mov(result_tmp, cnt2); 4458 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4459 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4460 4461 BIND(CH1_LOOP); 4462 (this->*load_4chr)(ch2, Address(str2, cnt2_neg)); 4463 cmp(ch1, ch2); 4464 br(EQ, MATCH); 4465 adds(cnt2_neg, cnt2_neg, str2_chr_size); 4466 br(LE, CH1_LOOP); 4467 b(NOMATCH); 4468 } 4469 4470 if ((icnt1 == -1 && str1_isL == str2_isL) || icnt1 == 2) { 4471 Label CH1_LOOP; 4472 4473 BIND(DO2); 4474 (this->*load_2chr)(ch1, str1); 4475 sub(cnt2, cnt2, 2); 4476 mov(result_tmp, cnt2); 4477 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4478 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4479 4480 BIND(CH1_LOOP); 4481 (this->*load_2chr)(ch2, Address(str2, cnt2_neg)); 4482 cmp(ch1, ch2); 4483 br(EQ, MATCH); 4484 adds(cnt2_neg, cnt2_neg, str2_chr_size); 4485 br(LE, CH1_LOOP); 4486 b(NOMATCH); 4487 } 4488 4489 if ((icnt1 == -1 && str1_isL == str2_isL) || icnt1 == 3) { 4490 Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; 4491 4492 BIND(DO3); 4493 (this->*load_2chr)(first, str1); 4494 (this->*str1_load_1chr)(ch1, Address(str1, 2*str1_chr_size)); 4495 4496 sub(cnt2, cnt2, 3); 4497 mov(result_tmp, cnt2); 4498 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4499 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4500 4501 BIND(FIRST_LOOP); 4502 (this->*load_2chr)(ch2, Address(str2, cnt2_neg)); 4503 cmpw(first, ch2); 4504 br(EQ, STR1_LOOP); 4505 BIND(STR2_NEXT); 4506 adds(cnt2_neg, cnt2_neg, str2_chr_size); 4507 br(LE, FIRST_LOOP); 4508 b(NOMATCH); 4509 4510 BIND(STR1_LOOP); 4511 add(cnt2tmp, cnt2_neg, 2*str2_chr_size); 4512 (this->*str2_load_1chr)(ch2, Address(str2, cnt2tmp)); 4513 cmp(ch1, ch2); 4514 br(NE, STR2_NEXT); 4515 b(MATCH); 4516 } 4517 4518 if (icnt1 == -1 || icnt1 == 1) { 4519 Label CH1_LOOP, HAS_ZERO; 4520 Label DO1_SHORT, DO1_LOOP; 4521 4522 BIND(DO1); 4523 (this->*str1_load_1chr)(ch1, str1); 4524 cmp(cnt2, 8); 4525 br(LT, DO1_SHORT); 4526 4527 if (str2_isL) { 4528 if (!str1_isL) { 4529 tst(ch1, 0xff00); 4530 br(NE, NOMATCH); 4531 } 4532 orr(ch1, ch1, ch1, LSL, 8); 4533 } 4534 orr(ch1, ch1, ch1, LSL, 16); 4535 orr(ch1, ch1, ch1, LSL, 32); 4536 4537 sub(cnt2, cnt2, 8/str2_chr_size); 4538 mov(result_tmp, cnt2); 4539 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4540 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4541 4542 mov(tmp3, str2_isL ? 0x0101010101010101 : 0x0001000100010001); 4543 BIND(CH1_LOOP); 4544 ldr(ch2, Address(str2, cnt2_neg)); 4545 eor(ch2, ch1, ch2); 4546 sub(tmp1, ch2, tmp3); 4547 orr(tmp2, ch2, str2_isL ? 0x7f7f7f7f7f7f7f7f : 0x7fff7fff7fff7fff); 4548 bics(tmp1, tmp1, tmp2); 4549 br(NE, HAS_ZERO); 4550 adds(cnt2_neg, cnt2_neg, 8); 4551 br(LT, CH1_LOOP); 4552 4553 cmp(cnt2_neg, 8); 4554 mov(cnt2_neg, 0); 4555 br(LT, CH1_LOOP); 4556 b(NOMATCH); 4557 4558 BIND(HAS_ZERO); 4559 rev(tmp1, tmp1); 4560 clz(tmp1, tmp1); 4561 add(cnt2_neg, cnt2_neg, tmp1, LSR, 3); 4562 b(MATCH); 4563 4564 BIND(DO1_SHORT); 4565 mov(result_tmp, cnt2); 4566 lea(str2, Address(str2, cnt2, Address::lsl(str2_chr_shift))); 4567 sub(cnt2_neg, zr, cnt2, LSL, str2_chr_shift); 4568 BIND(DO1_LOOP); 4569 (this->*str2_load_1chr)(ch2, Address(str2, cnt2_neg)); 4570 cmpw(ch1, ch2); 4571 br(EQ, MATCH); 4572 adds(cnt2_neg, cnt2_neg, str2_chr_size); 4573 br(LT, DO1_LOOP); 4574 } 4575 } 4576 BIND(NOMATCH); 4577 mov(result, -1); 4578 b(DONE); 4579 BIND(MATCH); 4580 add(result, result_tmp, cnt2_neg, ASR, str2_chr_shift); 4581 BIND(DONE); 4582 } 4583 4584 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr); 4585 typedef void (MacroAssembler::* uxt_insn)(Register Rd, Register Rn); 4586 4587 // Compare strings. 4588 void MacroAssembler::string_compare(Register str1, Register str2, 4589 Register cnt1, Register cnt2, Register result, 4590 Register tmp1, 4591 FloatRegister vtmp, FloatRegister vtmpZ, int ae) { 4592 Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING, 4593 NEXT_WORD, DIFFERENCE; 4594 4595 bool isLL = ae == StrIntrinsicNode::LL; 4596 bool isLU = ae == StrIntrinsicNode::LU; 4597 bool isUL = ae == StrIntrinsicNode::UL; 4598 4599 bool str1_isL = isLL || isLU; 4600 bool str2_isL = isLL || isUL; 4601 4602 int str1_chr_shift = str1_isL ? 0 : 1; 4603 int str2_chr_shift = str2_isL ? 0 : 1; 4604 int str1_chr_size = str1_isL ? 1 : 2; 4605 int str2_chr_size = str2_isL ? 1 : 2; 4606 4607 chr_insn str1_load_chr = str1_isL ? (chr_insn)&MacroAssembler::ldrb : 4608 (chr_insn)&MacroAssembler::ldrh; 4609 chr_insn str2_load_chr = str2_isL ? (chr_insn)&MacroAssembler::ldrb : 4610 (chr_insn)&MacroAssembler::ldrh; 4611 uxt_insn ext_chr = isLL ? (uxt_insn)&MacroAssembler::uxtbw : 4612 (uxt_insn)&MacroAssembler::uxthw; 4613 4614 BLOCK_COMMENT("string_compare {"); 4615 4616 // Bizzarely, the counts are passed in bytes, regardless of whether they 4617 // are L or U strings, however the result is always in characters. 4618 if (!str1_isL) asrw(cnt1, cnt1, 1); 4619 if (!str2_isL) asrw(cnt2, cnt2, 1); 4620 4621 // Compute the minimum of the string lengths and save the difference. 4622 subsw(tmp1, cnt1, cnt2); 4623 cselw(cnt2, cnt1, cnt2, Assembler::LE); // min 4624 4625 // A very short string 4626 cmpw(cnt2, isLL ? 8:4); 4627 br(Assembler::LT, SHORT_STRING); 4628 4629 // Check if the strings start at the same location. 4630 cmp(str1, str2); 4631 br(Assembler::EQ, LENGTH_DIFF); 4632 4633 // Compare longwords 4634 { 4635 subw(cnt2, cnt2, isLL ? 8:4); // The last longword is a special case 4636 4637 // Move both string pointers to the last longword of their 4638 // strings, negate the remaining count, and convert it to bytes. 4639 lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift))); 4640 lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift))); 4641 if (isLU || isUL) { 4642 sub(cnt1, zr, cnt2, LSL, str1_chr_shift); 4643 eor(vtmpZ, T16B, vtmpZ, vtmpZ); 4644 } 4645 sub(cnt2, zr, cnt2, LSL, str2_chr_shift); 4646 4647 // Loop, loading longwords and comparing them into rscratch2. 4648 bind(NEXT_WORD); 4649 if (isLU) { 4650 ldrs(vtmp, Address(str1, cnt1)); 4651 zip1(vtmp, T8B, vtmp, vtmpZ); 4652 umov(result, vtmp, D, 0); 4653 } else { 4654 ldr(result, Address(str1, isUL ? cnt1:cnt2)); 4655 } 4656 if (isUL) { 4657 ldrs(vtmp, Address(str2, cnt2)); 4658 zip1(vtmp, T8B, vtmp, vtmpZ); 4659 umov(rscratch1, vtmp, D, 0); 4660 } else { 4661 ldr(rscratch1, Address(str2, cnt2)); 4662 } 4663 adds(cnt2, cnt2, isUL ? 4:8); 4664 if (isLU || isUL) add(cnt1, cnt1, isLU ? 4:8); 4665 eor(rscratch2, result, rscratch1); 4666 cbnz(rscratch2, DIFFERENCE); 4667 br(Assembler::LT, NEXT_WORD); 4668 4669 // Last longword. In the case where length == 4 we compare the 4670 // same longword twice, but that's still faster than another 4671 // conditional branch. 4672 4673 if (isLU) { 4674 ldrs(vtmp, Address(str1)); 4675 zip1(vtmp, T8B, vtmp, vtmpZ); 4676 umov(result, vtmp, D, 0); 4677 } else { 4678 ldr(result, Address(str1)); 4679 } 4680 if (isUL) { 4681 ldrs(vtmp, Address(str2)); 4682 zip1(vtmp, T8B, vtmp, vtmpZ); 4683 umov(rscratch1, vtmp, D, 0); 4684 } else { 4685 ldr(rscratch1, Address(str2)); 4686 } 4687 eor(rscratch2, result, rscratch1); 4688 cbz(rscratch2, LENGTH_DIFF); 4689 4690 // Find the first different characters in the longwords and 4691 // compute their difference. 4692 bind(DIFFERENCE); 4693 rev(rscratch2, rscratch2); 4694 clz(rscratch2, rscratch2); 4695 andr(rscratch2, rscratch2, isLL ? -8 : -16); 4696 lsrv(result, result, rscratch2); 4697 (this->*ext_chr)(result, result); 4698 lsrv(rscratch1, rscratch1, rscratch2); 4699 (this->*ext_chr)(rscratch1, rscratch1); 4700 subw(result, result, rscratch1); 4701 b(DONE); 4702 } 4703 4704 bind(SHORT_STRING); 4705 // Is the minimum length zero? 4706 cbz(cnt2, LENGTH_DIFF); 4707 4708 bind(SHORT_LOOP); 4709 (this->*str1_load_chr)(result, Address(post(str1, str1_chr_size))); 4710 (this->*str2_load_chr)(cnt1, Address(post(str2, str2_chr_size))); 4711 subw(result, result, cnt1); 4712 cbnz(result, DONE); 4713 sub(cnt2, cnt2, 1); 4714 cbnz(cnt2, SHORT_LOOP); 4715 4716 // Strings are equal up to min length. Return the length difference. 4717 bind(LENGTH_DIFF); 4718 mov(result, tmp1); 4719 4720 // That's it 4721 bind(DONE); 4722 4723 BLOCK_COMMENT("} string_compare"); 4724 } 4725 4726 // Compare Strings or char/byte arrays. 4727 4728 // is_string is true iff this is a string comparison. 4729 4730 // For Strings we're passed the address of the first characters in a1 4731 // and a2 and the length in cnt1. 4732 4733 // For byte and char arrays we're passed the arrays themselves and we 4734 // have to extract length fields and do null checks here. 4735 4736 // elem_size is the element size in bytes: either 1 or 2. 4737 4738 // There are two implementations. For arrays >= 8 bytes, all 4739 // comparisons (including the final one, which may overlap) are 4740 // performed 8 bytes at a time. For arrays < 8 bytes, we compare a 4741 // halfword, then a short, and then a byte. 4742 4743 void MacroAssembler::arrays_equals(Register a1, Register a2, 4744 Register result, Register cnt1, 4745 int elem_size, bool is_string) 4746 { 4747 Label SAME, DONE, SHORT, NEXT_WORD, ONE; 4748 Register tmp1 = rscratch1; 4749 Register tmp2 = rscratch2; 4750 Register cnt2 = tmp2; // cnt2 only used in array length compare 4751 int elem_per_word = wordSize/elem_size; 4752 int log_elem_size = exact_log2(elem_size); 4753 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4754 int base_offset 4755 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 4756 4757 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 4758 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 4759 4760 #ifndef PRODUCT 4761 { 4762 const char kind = (elem_size == 2) ? 'U' : 'L'; 4763 char comment[64]; 4764 snprintf(comment, sizeof comment, "%s%c%s {", 4765 is_string ? "string_equals" : "array_equals", 4766 kind, "{"); 4767 BLOCK_COMMENT(comment); 4768 } 4769 #endif 4770 4771 mov(result, false); 4772 4773 if (!is_string) { 4774 // if (a==a2) 4775 // return true; 4776 eor(rscratch1, a1, a2); 4777 cbz(rscratch1, SAME); 4778 // if (a==null || a2==null) 4779 // return false; 4780 cbz(a1, DONE); 4781 cbz(a2, DONE); 4782 // if (a1.length != a2.length) 4783 // return false; 4784 ldrw(cnt1, Address(a1, length_offset)); 4785 ldrw(cnt2, Address(a2, length_offset)); 4786 eorw(tmp1, cnt1, cnt2); 4787 cbnzw(tmp1, DONE); 4788 4789 lea(a1, Address(a1, base_offset)); 4790 lea(a2, Address(a2, base_offset)); 4791 } 4792 4793 // Check for short strings, i.e. smaller than wordSize. 4794 subs(cnt1, cnt1, elem_per_word); 4795 br(Assembler::LT, SHORT); 4796 // Main 8 byte comparison loop. 4797 bind(NEXT_WORD); { 4798 ldr(tmp1, Address(post(a1, wordSize))); 4799 ldr(tmp2, Address(post(a2, wordSize))); 4800 subs(cnt1, cnt1, elem_per_word); 4801 eor(tmp1, tmp1, tmp2); 4802 cbnz(tmp1, DONE); 4803 } br(GT, NEXT_WORD); 4804 // Last longword. In the case where length == 4 we compare the 4805 // same longword twice, but that's still faster than another 4806 // conditional branch. 4807 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 4808 // length == 4. 4809 if (log_elem_size > 0) 4810 lsl(cnt1, cnt1, log_elem_size); 4811 ldr(tmp1, Address(a1, cnt1)); 4812 ldr(tmp2, Address(a2, cnt1)); 4813 eor(tmp1, tmp1, tmp2); 4814 cbnz(tmp1, DONE); 4815 b(SAME); 4816 4817 bind(SHORT); 4818 Label TAIL03, TAIL01; 4819 4820 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 4821 { 4822 ldrw(tmp1, Address(post(a1, 4))); 4823 ldrw(tmp2, Address(post(a2, 4))); 4824 eorw(tmp1, tmp1, tmp2); 4825 cbnzw(tmp1, DONE); 4826 } 4827 bind(TAIL03); 4828 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 4829 { 4830 ldrh(tmp1, Address(post(a1, 2))); 4831 ldrh(tmp2, Address(post(a2, 2))); 4832 eorw(tmp1, tmp1, tmp2); 4833 cbnzw(tmp1, DONE); 4834 } 4835 bind(TAIL01); 4836 if (elem_size == 1) { // Only needed when comparing byte arrays. 4837 tbz(cnt1, 0, SAME); // 0-1 bytes left. 4838 { 4839 ldrb(tmp1, a1); 4840 ldrb(tmp2, a2); 4841 eorw(tmp1, tmp1, tmp2); 4842 cbnzw(tmp1, DONE); 4843 } 4844 } 4845 // Arrays are equal. 4846 bind(SAME); 4847 mov(result, true); 4848 4849 // That's it. 4850 bind(DONE); 4851 BLOCK_COMMENT(is_string ? "} string_equals" : "} array_equals"); 4852 } 4853 4854 4855 // base: Address of a buffer to be zeroed, 8 bytes aligned. 4856 // cnt: Count in HeapWords. 4857 // is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit. 4858 void MacroAssembler::zero_words(Register base, Register cnt) 4859 { 4860 if (UseBlockZeroing) { 4861 block_zero(base, cnt); 4862 } else { 4863 fill_words(base, cnt, zr); 4864 } 4865 } 4866 4867 // r10 = base: Address of a buffer to be zeroed, 8 bytes aligned. 4868 // cnt: Immediate count in HeapWords. 4869 // r11 = tmp: For use as cnt if we need to call out 4870 #define ShortArraySize (18 * BytesPerLong) 4871 void MacroAssembler::zero_words(Register base, u_int64_t cnt) 4872 { 4873 Register tmp = r11; 4874 int i = cnt & 1; // store any odd word to start 4875 if (i) str(zr, Address(base)); 4876 4877 if (cnt <= ShortArraySize / BytesPerLong) { 4878 for (; i < (int)cnt; i += 2) 4879 stp(zr, zr, Address(base, i * wordSize)); 4880 } else if (UseBlockZeroing && cnt >= (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)) { 4881 mov(tmp, cnt); 4882 block_zero(base, tmp, true); 4883 } else { 4884 const int unroll = 4; // Number of stp(zr, zr) instructions we'll unroll 4885 int remainder = cnt % (2 * unroll); 4886 for (; i < remainder; i += 2) 4887 stp(zr, zr, Address(base, i * wordSize)); 4888 4889 Label loop; 4890 Register cnt_reg = rscratch1; 4891 Register loop_base = rscratch2; 4892 cnt = cnt - remainder; 4893 mov(cnt_reg, cnt); 4894 // adjust base and prebias by -2 * wordSize so we can pre-increment 4895 add(loop_base, base, (remainder - 2) * wordSize); 4896 bind(loop); 4897 sub(cnt_reg, cnt_reg, 2 * unroll); 4898 for (i = 1; i < unroll; i++) 4899 stp(zr, zr, Address(loop_base, 2 * i * wordSize)); 4900 stp(zr, zr, Address(pre(loop_base, 2 * unroll * wordSize))); 4901 cbnz(cnt_reg, loop); 4902 } 4903 } 4904 4905 // base: Address of a buffer to be filled, 8 bytes aligned. 4906 // cnt: Count in 8-byte unit. 4907 // value: Value to be filled with. 4908 // base will point to the end of the buffer after filling. 4909 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 4910 { 4911 // Algorithm: 4912 // 4913 // scratch1 = cnt & 7; 4914 // cnt -= scratch1; 4915 // p += scratch1; 4916 // switch (scratch1) { 4917 // do { 4918 // cnt -= 8; 4919 // p[-8] = v; 4920 // case 7: 4921 // p[-7] = v; 4922 // case 6: 4923 // p[-6] = v; 4924 // // ... 4925 // case 1: 4926 // p[-1] = v; 4927 // case 0: 4928 // p += 8; 4929 // } while (cnt); 4930 // } 4931 4932 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 4933 4934 Label fini, skip, entry, loop; 4935 const int unroll = 8; // Number of stp instructions we'll unroll 4936 4937 cbz(cnt, fini); 4938 tbz(base, 3, skip); 4939 str(value, Address(post(base, 8))); 4940 sub(cnt, cnt, 1); 4941 bind(skip); 4942 4943 andr(rscratch1, cnt, (unroll-1) * 2); 4944 sub(cnt, cnt, rscratch1); 4945 add(base, base, rscratch1, Assembler::LSL, 3); 4946 adr(rscratch2, entry); 4947 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 4948 br(rscratch2); 4949 4950 bind(loop); 4951 add(base, base, unroll * 16); 4952 for (int i = -unroll; i < 0; i++) 4953 stp(value, value, Address(base, i * 16)); 4954 bind(entry); 4955 subs(cnt, cnt, unroll * 2); 4956 br(Assembler::GE, loop); 4957 4958 tbz(cnt, 0, fini); 4959 str(value, Address(post(base, 8))); 4960 bind(fini); 4961 } 4962 4963 // Use DC ZVA to do fast zeroing. 4964 // base: Address of a buffer to be zeroed, 8 bytes aligned. 4965 // cnt: Count in HeapWords. 4966 // is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit. 4967 void MacroAssembler::block_zero(Register base, Register cnt, bool is_large) 4968 { 4969 Label small; 4970 Label store_pair, loop_store_pair, done; 4971 Label base_aligned; 4972 4973 assert_different_registers(base, cnt, rscratch1); 4974 guarantee(base == r10 && cnt == r11, "fix register usage"); 4975 4976 Register tmp = rscratch1; 4977 Register tmp2 = rscratch2; 4978 int zva_length = VM_Version::zva_length(); 4979 4980 // Ensure ZVA length can be divided by 16. This is required by 4981 // the subsequent operations. 4982 assert (zva_length % 16 == 0, "Unexpected ZVA Length"); 4983 4984 if (!is_large) cbz(cnt, done); 4985 tbz(base, 3, base_aligned); 4986 str(zr, Address(post(base, 8))); 4987 sub(cnt, cnt, 1); 4988 bind(base_aligned); 4989 4990 // Ensure count >= zva_length * 2 so that it still deserves a zva after 4991 // alignment. 4992 if (!is_large || !(BlockZeroingLowLimit >= zva_length * 2)) { 4993 int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit); 4994 subs(tmp, cnt, low_limit >> 3); 4995 br(Assembler::LT, small); 4996 } 4997 4998 far_call(StubRoutines::aarch64::get_zero_longs()); 4999 5000 bind(small); 5001 5002 const int unroll = 8; // Number of stp instructions we'll unroll 5003 Label small_loop, small_table_end; 5004 5005 andr(tmp, cnt, (unroll-1) * 2); 5006 sub(cnt, cnt, tmp); 5007 add(base, base, tmp, Assembler::LSL, 3); 5008 adr(tmp2, small_table_end); 5009 sub(tmp2, tmp2, tmp, Assembler::LSL, 1); 5010 br(tmp2); 5011 5012 bind(small_loop); 5013 add(base, base, unroll * 16); 5014 for (int i = -unroll; i < 0; i++) 5015 stp(zr, zr, Address(base, i * 16)); 5016 bind(small_table_end); 5017 subs(cnt, cnt, unroll * 2); 5018 br(Assembler::GE, small_loop); 5019 5020 tbz(cnt, 0, done); 5021 str(zr, Address(post(base, 8))); 5022 5023 bind(done); 5024 } 5025 5026 // Intrinsic for sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray and 5027 // java/lang/StringUTF16.compress. 5028 void MacroAssembler::encode_iso_array(Register src, Register dst, 5029 Register len, Register result, 5030 FloatRegister Vtmp1, FloatRegister Vtmp2, 5031 FloatRegister Vtmp3, FloatRegister Vtmp4) 5032 { 5033 Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1; 5034 Register tmp1 = rscratch1; 5035 5036 mov(result, len); // Save initial len 5037 5038 #ifndef BUILTIN_SIM 5039 subs(len, len, 32); 5040 br(LT, LOOP_8); 5041 5042 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions 5043 // to convert chars to bytes. These set the 'QC' bit in the FPSR if 5044 // any char could not fit in a byte, so clear the FPSR so we can test it. 5045 clear_fpsr(); 5046 5047 BIND(NEXT_32); 5048 ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); 5049 uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half 5050 uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half 5051 uqxtn(Vtmp2, T8B, Vtmp3, T8H); 5052 uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2 5053 get_fpsr(tmp1); 5054 cbnzw(tmp1, LOOP_8); 5055 st1(Vtmp1, Vtmp2, T16B, post(dst, 32)); 5056 subs(len, len, 32); 5057 add(src, src, 64); 5058 br(GE, NEXT_32); 5059 5060 BIND(LOOP_8); 5061 adds(len, len, 32-8); 5062 br(LT, LOOP_1); 5063 clear_fpsr(); // QC may be set from loop above, clear again 5064 BIND(NEXT_8); 5065 ld1(Vtmp1, T8H, src); 5066 uqxtn(Vtmp1, T8B, Vtmp1, T8H); 5067 get_fpsr(tmp1); 5068 cbnzw(tmp1, LOOP_1); 5069 st1(Vtmp1, T8B, post(dst, 8)); 5070 subs(len, len, 8); 5071 add(src, src, 16); 5072 br(GE, NEXT_8); 5073 5074 BIND(LOOP_1); 5075 adds(len, len, 8); 5076 br(LE, DONE); 5077 #else 5078 cbz(len, DONE); 5079 #endif 5080 BIND(NEXT_1); 5081 ldrh(tmp1, Address(post(src, 2))); 5082 tst(tmp1, 0xff00); 5083 br(NE, DONE); 5084 strb(tmp1, Address(post(dst, 1))); 5085 subs(len, len, 1); 5086 br(GT, NEXT_1); 5087 5088 BIND(DONE); 5089 sub(result, result, len); // Return index where we stopped 5090 // Return len == 0 if we processed all 5091 // characters 5092 } 5093 5094 5095 // Inflate byte[] array to char[]. 5096 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 5097 FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3, 5098 Register tmp4) { 5099 Label big, done; 5100 5101 assert_different_registers(src, dst, len, tmp4, rscratch1); 5102 5103 fmovd(vtmp1 , zr); 5104 lsrw(rscratch1, len, 3); 5105 5106 cbnzw(rscratch1, big); 5107 5108 // Short string: less than 8 bytes. 5109 { 5110 Label loop, around, tiny; 5111 5112 subsw(len, len, 4); 5113 andw(len, len, 3); 5114 br(LO, tiny); 5115 5116 // Use SIMD to do 4 bytes. 5117 ldrs(vtmp2, post(src, 4)); 5118 zip1(vtmp3, T8B, vtmp2, vtmp1); 5119 strd(vtmp3, post(dst, 8)); 5120 5121 cbzw(len, done); 5122 5123 // Do the remaining bytes by steam. 5124 bind(loop); 5125 ldrb(tmp4, post(src, 1)); 5126 strh(tmp4, post(dst, 2)); 5127 subw(len, len, 1); 5128 5129 bind(tiny); 5130 cbnz(len, loop); 5131 5132 bind(around); 5133 b(done); 5134 } 5135 5136 // Unpack the bytes 8 at a time. 5137 bind(big); 5138 andw(len, len, 7); 5139 5140 { 5141 Label loop, around; 5142 5143 bind(loop); 5144 ldrd(vtmp2, post(src, 8)); 5145 sub(rscratch1, rscratch1, 1); 5146 zip1(vtmp3, T16B, vtmp2, vtmp1); 5147 st1(vtmp3, T8H, post(dst, 16)); 5148 cbnz(rscratch1, loop); 5149 5150 bind(around); 5151 } 5152 5153 // Do the tail of up to 8 bytes. 5154 sub(src, src, 8); 5155 add(src, src, len, ext::uxtw, 0); 5156 ldrd(vtmp2, Address(src)); 5157 sub(dst, dst, 16); 5158 add(dst, dst, len, ext::uxtw, 1); 5159 zip1(vtmp3, T16B, vtmp2, vtmp1); 5160 st1(vtmp3, T8H, Address(dst)); 5161 5162 bind(done); 5163 } 5164 5165 // Compress char[] array to byte[]. 5166 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 5167 FloatRegister tmp1Reg, FloatRegister tmp2Reg, 5168 FloatRegister tmp3Reg, FloatRegister tmp4Reg, 5169 Register result) { 5170 encode_iso_array(src, dst, len, result, 5171 tmp1Reg, tmp2Reg, tmp3Reg, tmp4Reg); 5172 cmp(len, zr); 5173 csel(result, result, zr, EQ); 5174 } 5175 5176 // get_thread() can be called anywhere inside generated code so we 5177 // need to save whatever non-callee save context might get clobbered 5178 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 5179 // the call setup code. 5180 // 5181 // aarch64_get_thread_helper() clobbers only r0, r1, and flags. 5182 // 5183 void MacroAssembler::get_thread(Register dst) { 5184 RegSet saved_regs = RegSet::range(r0, r1) + lr - dst; 5185 push(saved_regs, sp); 5186 5187 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 5188 blrt(lr, 1, 0, 1); 5189 if (dst != c_rarg0) { 5190 mov(dst, c_rarg0); 5191 } 5192 5193 pop(saved_regs, sp); 5194 } 5195 5196 // Shenandoah requires that all objects are evacuated before being 5197 // written to, and that fromspace pointers are not written into 5198 // objects during concurrent marking. These methods check for that. 5199 5200 const bool ShenandoahStoreCheck = false; 5201 5202 void MacroAssembler::in_heap_check(Register r, Label &nope) { 5203 ShenandoahHeap *h = (ShenandoahHeap *)Universe::heap(); 5204 5205 HeapWord* first_region_bottom = h->first_region_bottom(); 5206 HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * h->max_regions(); 5207 5208 mov(rscratch1, (uintptr_t)first_region_bottom); 5209 cmp(r, rscratch1); 5210 br(Assembler::LO, nope); 5211 mov(rscratch1, (uintptr_t)last_region_end); 5212 cmp(r, rscratch1); 5213 br(Assembler::HS, nope); 5214 } 5215 5216 void MacroAssembler::shenandoah_store_check(Register r, Address dest) { 5217 if (! ShenandoahStoreCheck) 5218 return; 5219 5220 assert_different_registers(rscratch1, rscratch2, r); 5221 assert(! dest.uses(rscratch1), "invalid register"); 5222 assert(! dest.uses(rscratch2), "invalid register"); 5223 5224 assert(! InlineObjectCopy, "ShenandoahStoreCheck is incompatible with InlineObjectCopy"); 5225 5226 Label done; 5227 cbz(r, done); 5228 5229 mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); 5230 Assembler::ldrw(rscratch2, Address(rscratch2)); 5231 cbzw(rscratch2, done); 5232 5233 in_heap_check(r, done); 5234 5235 // Check for object in collection set. 5236 lsr(rscratch1, r, ShenandoahHeapRegion::RegionSizeShift); 5237 mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 5238 ldrb(rscratch2, Address(rscratch2, rscratch1)); 5239 tbz(rscratch2, 0, done); 5240 5241 // Check for dest in heap 5242 lea(rscratch2, dest); 5243 in_heap_check(rscratch2, done); 5244 5245 lsr(rscratch1, rscratch2, ShenandoahHeapRegion::RegionSizeShift); 5246 mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 5247 ldrb(rscratch2, Address(rscratch2, rscratch1)); 5248 tbz(rscratch2, 0, done); 5249 5250 ldr(rscratch2, Address(r, BrooksPointer::byte_offset())); 5251 5252 stop("Shenandoah: store of oop in collection set during marking!", &done); 5253 should_not_reach_here(); 5254 5255 bind(done); 5256 } 5257 5258 void MacroAssembler::shenandoah_store_check(Address dest) { 5259 if (! ShenandoahStoreCheck) 5260 return; 5261 5262 assert(! dest.uses(rscratch1), "invalid register"); 5263 assert(! dest.uses(rscratch2), "invalid register"); 5264 5265 Label done, yes; 5266 5267 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::evacuation_in_progress_offset()))); 5268 cbnzw(rscratch2, yes); 5269 5270 mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); 5271 Assembler::ldrw(rscratch2, Address(rscratch2)); 5272 cbzw(rscratch2, done); 5273 5274 bind(yes); 5275 5276 // Check for dest in heap 5277 lea(rscratch2, dest); 5278 cbz(rscratch2, done); 5279 in_heap_check(rscratch2, done); 5280 5281 lsr(rscratch1, rscratch2, ShenandoahHeapRegion::RegionSizeShift); 5282 mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 5283 ldrb(rscratch2, Address(rscratch2, rscratch1)); 5284 tbz(rscratch2, 0, done); 5285 5286 stop("Shenandoah: store in collection set during marking/evacuation!", &done); 5287 should_not_reach_here(); 5288 5289 bind(done); 5290 } 5291 5292 void MacroAssembler::shenandoah_store_check(Register dest) { 5293 if (! ShenandoahStoreCheck) 5294 return; 5295 5296 assert_different_registers(rscratch1, rscratch2, dest); 5297 5298 Label done, yes; 5299 5300 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::evacuation_in_progress_offset()))); 5301 cbnzw(rscratch2, yes); 5302 5303 mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); 5304 Assembler::ldrw(rscratch2, Address(rscratch2)); 5305 cbzw(rscratch2, done); 5306 5307 bind(yes); 5308 5309 // Check for dest in heap 5310 cbz(dest, done); 5311 in_heap_check(dest, done); 5312 5313 lsr(rscratch1, dest, ShenandoahHeapRegion::RegionSizeShift); 5314 mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 5315 ldrb(rscratch2, Address(rscratch2, rscratch1)); 5316 tbz(rscratch2, 0, done); 5317 5318 stop("Shenandoah: store in collection set during marking/evacuation!", &done); 5319 should_not_reach_here(); 5320 5321 bind(done); 5322 } 5323