1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 33 #include "compiler/disassembler.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "nativeInst_aarch64.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/node.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/icache.hpp" 40 #include "runtime/interfaceSupport.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 #if INCLUDE_ALL_GCS 44 #include "gc/g1/g1CollectedHeap.inline.hpp" 45 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 46 #include "gc/g1/heapRegion.hpp" 47 #endif 48 49 #ifdef PRODUCT 50 #define BLOCK_COMMENT(str) /* nothing */ 51 #define STOP(error) stop(error) 52 #else 53 #define BLOCK_COMMENT(str) block_comment(str) 54 #define STOP(error) block_comment(error); stop(error) 55 #endif 56 57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 58 59 // Patch any kind of instruction; there may be several instructions. 60 // Return the total length (in bytes) of the instructions. 61 int MacroAssembler::pd_patch_instruction_size(address branch, address target) { 62 int instructions = 1; 63 assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); 64 long offset = (target - branch) >> 2; 65 unsigned insn = *(unsigned*)branch; 66 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { 67 // Load register (literal) 68 Instruction_aarch64::spatch(branch, 23, 5, offset); 69 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 70 // Unconditional branch (immediate) 71 Instruction_aarch64::spatch(branch, 25, 0, offset); 72 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 73 // Conditional branch (immediate) 74 Instruction_aarch64::spatch(branch, 23, 5, offset); 75 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 76 // Compare & branch (immediate) 77 Instruction_aarch64::spatch(branch, 23, 5, offset); 78 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 79 // Test & branch (immediate) 80 Instruction_aarch64::spatch(branch, 18, 5, offset); 81 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 82 // PC-rel. addressing 83 offset = target-branch; 84 int shift = Instruction_aarch64::extract(insn, 31, 31); 85 if (shift) { 86 u_int64_t dest = (u_int64_t)target; 87 uint64_t pc_page = (uint64_t)branch >> 12; 88 uint64_t adr_page = (uint64_t)target >> 12; 89 unsigned offset_lo = dest & 0xfff; 90 offset = adr_page - pc_page; 91 92 // We handle 3 types of PC relative addressing 93 // 1 - adrp Rx, target_page 94 // ldr/str Ry, [Rx, #offset_in_page] 95 // 2 - adrp Rx, target_page 96 // add Ry, Rx, #offset_in_page 97 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 98 // In the first 2 cases we must check that Rx is the same in the adrp and the 99 // subsequent ldr/str or add instruction. Otherwise we could accidentally end 100 // up treating a type 3 relocation as a type 1 or 2 just because it happened 101 // to be followed by a random unrelated ldr/str or add instruction. 102 // 103 // In the case of a type 3 relocation, we know that these are only generated 104 // for the safepoint polling page, or for the card type byte map base so we 105 // assert as much and of course that the offset is 0. 106 // 107 unsigned insn2 = ((unsigned*)branch)[1]; 108 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 109 Instruction_aarch64::extract(insn, 4, 0) == 110 Instruction_aarch64::extract(insn2, 9, 5)) { 111 // Load/store register (unsigned immediate) 112 unsigned size = Instruction_aarch64::extract(insn2, 31, 30); 113 Instruction_aarch64::patch(branch + sizeof (unsigned), 114 21, 10, offset_lo >> size); 115 guarantee(((dest >> size) << size) == dest, "misaligned target"); 116 instructions = 2; 117 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 118 Instruction_aarch64::extract(insn, 4, 0) == 119 Instruction_aarch64::extract(insn2, 4, 0)) { 120 // add (immediate) 121 Instruction_aarch64::patch(branch + sizeof (unsigned), 122 21, 10, offset_lo); 123 instructions = 2; 124 } else { 125 assert((jbyte *)target == 126 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 127 target == StubRoutines::crc_table_addr() || 128 (address)target == os::get_polling_page(), 129 "adrp must be polling page or byte map base"); 130 assert(offset_lo == 0, "offset must be 0 for polling page or byte map base"); 131 } 132 } 133 int offset_lo = offset & 3; 134 offset >>= 2; 135 Instruction_aarch64::spatch(branch, 23, 5, offset); 136 Instruction_aarch64::patch(branch, 30, 29, offset_lo); 137 } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { 138 u_int64_t dest = (u_int64_t)target; 139 // Move wide constant 140 assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); 141 assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); 142 Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff); 143 Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff); 144 Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff); 145 assert(target_addr_for_insn(branch) == target, "should be"); 146 instructions = 3; 147 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 148 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 149 // nothing to do 150 assert(target == 0, "did not expect to relocate target for polling page load"); 151 } else { 152 ShouldNotReachHere(); 153 } 154 return instructions * NativeInstruction::instruction_size; 155 } 156 157 int MacroAssembler::patch_oop(address insn_addr, address o) { 158 int instructions; 159 unsigned insn = *(unsigned*)insn_addr; 160 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 161 162 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 163 // narrow OOPs by setting the upper 16 bits in the first 164 // instruction. 165 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 166 // Move narrow OOP 167 narrowOop n = oopDesc::encode_heap_oop((oop)o); 168 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 169 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 170 instructions = 2; 171 } else { 172 // Move wide OOP 173 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 174 uintptr_t dest = (uintptr_t)o; 175 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 176 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 177 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 178 instructions = 3; 179 } 180 return instructions * NativeInstruction::instruction_size; 181 } 182 183 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { 184 long offset = 0; 185 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { 186 // Load register (literal) 187 offset = Instruction_aarch64::sextract(insn, 23, 5); 188 return address(((uint64_t)insn_addr + (offset << 2))); 189 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 190 // Unconditional branch (immediate) 191 offset = Instruction_aarch64::sextract(insn, 25, 0); 192 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 193 // Conditional branch (immediate) 194 offset = Instruction_aarch64::sextract(insn, 23, 5); 195 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 196 // Compare & branch (immediate) 197 offset = Instruction_aarch64::sextract(insn, 23, 5); 198 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 199 // Test & branch (immediate) 200 offset = Instruction_aarch64::sextract(insn, 18, 5); 201 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 202 // PC-rel. addressing 203 offset = Instruction_aarch64::extract(insn, 30, 29); 204 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2; 205 int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0; 206 if (shift) { 207 offset <<= shift; 208 uint64_t target_page = ((uint64_t)insn_addr) + offset; 209 target_page &= ((uint64_t)-1) << shift; 210 // Return the target address for the following sequences 211 // 1 - adrp Rx, target_page 212 // ldr/str Ry, [Rx, #offset_in_page] 213 // 2 - adrp Rx, target_page ] 214 // add Ry, Rx, #offset_in_page 215 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 216 // 217 // In the first two cases we check that the register is the same and 218 // return the target_page + the offset within the page. 219 // Otherwise we assume it is a page aligned relocation and return 220 // the target page only. The only cases this is generated is for 221 // the safepoint polling page or for the card table byte map base so 222 // we assert as much. 223 // 224 unsigned insn2 = ((unsigned*)insn_addr)[1]; 225 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 226 Instruction_aarch64::extract(insn, 4, 0) == 227 Instruction_aarch64::extract(insn2, 9, 5)) { 228 // Load/store register (unsigned immediate) 229 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 230 unsigned int size = Instruction_aarch64::extract(insn2, 31, 30); 231 return address(target_page + (byte_offset << size)); 232 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 233 Instruction_aarch64::extract(insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 4, 0)) { 235 // add (immediate) 236 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 237 return address(target_page + byte_offset); 238 } else { 239 assert((jbyte *)target_page == 240 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 241 (address)target_page == os::get_polling_page(), 242 "adrp must be polling page or byte map base"); 243 return (address)target_page; 244 } 245 } else { 246 ShouldNotReachHere(); 247 } 248 } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { 249 u_int32_t *insns = (u_int32_t *)insn_addr; 250 // Move wide constant: movz, movk, movk. See movptr(). 251 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 252 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 253 return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) 254 + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 255 + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 256 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 257 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 258 return 0; 259 } else { 260 ShouldNotReachHere(); 261 } 262 return address(((uint64_t)insn_addr + (offset << 2))); 263 } 264 265 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 266 dsb(Assembler::SY); 267 } 268 269 270 void MacroAssembler::reset_last_Java_frame(bool clear_fp, 271 bool clear_pc) { 272 // we must set sp to zero to clear frame 273 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 274 // must clear fp, so that compiled frames are not confused; it is 275 // possible that we need it only for debugging 276 if (clear_fp) { 277 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 278 } 279 280 if (clear_pc) { 281 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 282 } 283 } 284 285 // Calls to C land 286 // 287 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 288 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 289 // has to be reset to 0. This is required to allow proper stack traversal. 290 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 291 Register last_java_fp, 292 Register last_java_pc, 293 Register scratch) { 294 295 if (last_java_pc->is_valid()) { 296 str(last_java_pc, Address(rthread, 297 JavaThread::frame_anchor_offset() 298 + JavaFrameAnchor::last_Java_pc_offset())); 299 } 300 301 // determine last_java_sp register 302 if (last_java_sp == sp) { 303 mov(scratch, sp); 304 last_java_sp = scratch; 305 } else if (!last_java_sp->is_valid()) { 306 last_java_sp = esp; 307 } 308 309 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 310 311 // last_java_fp is optional 312 if (last_java_fp->is_valid()) { 313 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 314 } 315 } 316 317 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 318 Register last_java_fp, 319 address last_java_pc, 320 Register scratch) { 321 if (last_java_pc != NULL) { 322 adr(scratch, last_java_pc); 323 } else { 324 // FIXME: This is almost never correct. We should delete all 325 // cases of set_last_Java_frame with last_java_pc=NULL and use the 326 // correct return address instead. 327 adr(scratch, pc()); 328 } 329 330 str(scratch, Address(rthread, 331 JavaThread::frame_anchor_offset() 332 + JavaFrameAnchor::last_Java_pc_offset())); 333 334 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 335 } 336 337 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 338 Register last_java_fp, 339 Label &L, 340 Register scratch) { 341 if (L.is_bound()) { 342 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 343 } else { 344 InstructionMark im(this); 345 L.add_patch_at(code(), locator()); 346 set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch); 347 } 348 } 349 350 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { 351 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 352 assert(CodeCache::find_blob(entry.target()) != NULL, 353 "destination of far call not found in code cache"); 354 if (far_branches()) { 355 unsigned long offset; 356 // We can use ADRP here because we know that the total size of 357 // the code cache cannot exceed 2Gb. 358 adrp(tmp, entry, offset); 359 add(tmp, tmp, offset); 360 if (cbuf) cbuf->set_insts_mark(); 361 blr(tmp); 362 } else { 363 if (cbuf) cbuf->set_insts_mark(); 364 bl(entry); 365 } 366 } 367 368 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { 369 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 370 assert(CodeCache::find_blob(entry.target()) != NULL, 371 "destination of far call not found in code cache"); 372 if (far_branches()) { 373 unsigned long offset; 374 // We can use ADRP here because we know that the total size of 375 // the code cache cannot exceed 2Gb. 376 adrp(tmp, entry, offset); 377 add(tmp, tmp, offset); 378 if (cbuf) cbuf->set_insts_mark(); 379 br(tmp); 380 } else { 381 if (cbuf) cbuf->set_insts_mark(); 382 b(entry); 383 } 384 } 385 386 int MacroAssembler::biased_locking_enter(Register lock_reg, 387 Register obj_reg, 388 Register swap_reg, 389 Register tmp_reg, 390 bool swap_reg_contains_mark, 391 Label& done, 392 Label* slow_case, 393 BiasedLockingCounters* counters) { 394 assert(UseBiasedLocking, "why call this otherwise?"); 395 assert_different_registers(lock_reg, obj_reg, swap_reg); 396 397 if (PrintBiasedLockingStatistics && counters == NULL) 398 counters = BiasedLocking::counters(); 399 400 bool need_tmp_reg = false; 401 if (tmp_reg == noreg) { 402 tmp_reg = rscratch2; 403 } 404 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1); 405 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 406 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 407 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); 408 Address saved_mark_addr(lock_reg, 0); 409 410 // Biased locking 411 // See whether the lock is currently biased toward our thread and 412 // whether the epoch is still valid 413 // Note that the runtime guarantees sufficient alignment of JavaThread 414 // pointers to allow age to be placed into low bits 415 // First check to see whether biasing is even enabled for this object 416 Label cas_label; 417 int null_check_offset = -1; 418 if (!swap_reg_contains_mark) { 419 null_check_offset = offset(); 420 ldr(swap_reg, mark_addr); 421 } 422 andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); 423 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 424 br(Assembler::NE, cas_label); 425 // The bias pattern is present in the object's header. Need to check 426 // whether the bias owner and the epoch are both still current. 427 load_prototype_header(tmp_reg, obj_reg); 428 orr(tmp_reg, tmp_reg, rthread); 429 eor(tmp_reg, swap_reg, tmp_reg); 430 andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place)); 431 if (counters != NULL) { 432 Label around; 433 cbnz(tmp_reg, around); 434 atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1); 435 b(done); 436 bind(around); 437 } else { 438 cbz(tmp_reg, done); 439 } 440 441 Label try_revoke_bias; 442 Label try_rebias; 443 444 // At this point we know that the header has the bias pattern and 445 // that we are not the bias owner in the current epoch. We need to 446 // figure out more details about the state of the header in order to 447 // know what operations can be legally performed on the object's 448 // header. 449 450 // If the low three bits in the xor result aren't clear, that means 451 // the prototype header is no longer biased and we have to revoke 452 // the bias on this object. 453 andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place); 454 cbnz(rscratch1, try_revoke_bias); 455 456 // Biasing is still enabled for this data type. See whether the 457 // epoch of the current bias is still valid, meaning that the epoch 458 // bits of the mark word are equal to the epoch bits of the 459 // prototype header. (Note that the prototype header's epoch bits 460 // only change at a safepoint.) If not, attempt to rebias the object 461 // toward the current thread. Note that we must be absolutely sure 462 // that the current epoch is invalid in order to do this because 463 // otherwise the manipulations it performs on the mark word are 464 // illegal. 465 andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place); 466 cbnz(rscratch1, try_rebias); 467 468 // The epoch of the current bias is still valid but we know nothing 469 // about the owner; it might be set or it might be clear. Try to 470 // acquire the bias of the object using an atomic operation. If this 471 // fails we will go in to the runtime to revoke the object's bias. 472 // Note that we first construct the presumed unbiased header so we 473 // don't accidentally blow away another thread's valid bias. 474 { 475 Label here; 476 mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 477 andr(swap_reg, swap_reg, rscratch1); 478 orr(tmp_reg, swap_reg, rthread); 479 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 480 // If the biasing toward our thread failed, this means that 481 // another thread succeeded in biasing it toward itself and we 482 // need to revoke that bias. The revocation will occur in the 483 // interpreter runtime in the slow case. 484 bind(here); 485 if (counters != NULL) { 486 atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), 487 tmp_reg, rscratch1); 488 } 489 } 490 b(done); 491 492 bind(try_rebias); 493 // At this point we know the epoch has expired, meaning that the 494 // current "bias owner", if any, is actually invalid. Under these 495 // circumstances _only_, we are allowed to use the current header's 496 // value as the comparison value when doing the cas to acquire the 497 // bias in the current epoch. In other words, we allow transfer of 498 // the bias from one thread to another directly in this situation. 499 // 500 // FIXME: due to a lack of registers we currently blow away the age 501 // bits in this situation. Should attempt to preserve them. 502 { 503 Label here; 504 load_prototype_header(tmp_reg, obj_reg); 505 orr(tmp_reg, rthread, tmp_reg); 506 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 507 // If the biasing toward our thread failed, then another thread 508 // succeeded in biasing it toward itself and we need to revoke that 509 // bias. The revocation will occur in the runtime in the slow case. 510 bind(here); 511 if (counters != NULL) { 512 atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), 513 tmp_reg, rscratch1); 514 } 515 } 516 b(done); 517 518 bind(try_revoke_bias); 519 // The prototype mark in the klass doesn't have the bias bit set any 520 // more, indicating that objects of this data type are not supposed 521 // to be biased any more. We are going to try to reset the mark of 522 // this object to the prototype value and fall through to the 523 // CAS-based locking scheme. Note that if our CAS fails, it means 524 // that another thread raced us for the privilege of revoking the 525 // bias of this particular object, so it's okay to continue in the 526 // normal locking code. 527 // 528 // FIXME: due to a lack of registers we currently blow away the age 529 // bits in this situation. Should attempt to preserve them. 530 { 531 Label here, nope; 532 load_prototype_header(tmp_reg, obj_reg); 533 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); 534 bind(here); 535 536 // Fall through to the normal CAS-based lock, because no matter what 537 // the result of the above CAS, some thread must have succeeded in 538 // removing the bias bit from the object's header. 539 if (counters != NULL) { 540 atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg, 541 rscratch1); 542 } 543 bind(nope); 544 } 545 546 bind(cas_label); 547 548 return null_check_offset; 549 } 550 551 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 552 assert(UseBiasedLocking, "why call this otherwise?"); 553 554 // Check for biased locking unlock case, which is a no-op 555 // Note: we do not have to check the thread ID for two reasons. 556 // First, the interpreter checks for IllegalMonitorStateException at 557 // a higher level. Second, if the bias was revoked while we held the 558 // lock, the object could not be rebiased toward another thread, so 559 // the bias bit would be clear. 560 ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 561 andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 562 cmp(temp_reg, markOopDesc::biased_lock_pattern); 563 br(Assembler::EQ, done); 564 } 565 566 567 // added to make this compile 568 569 REGISTER_DEFINITION(Register, noreg); 570 571 static void pass_arg0(MacroAssembler* masm, Register arg) { 572 if (c_rarg0 != arg ) { 573 masm->mov(c_rarg0, arg); 574 } 575 } 576 577 static void pass_arg1(MacroAssembler* masm, Register arg) { 578 if (c_rarg1 != arg ) { 579 masm->mov(c_rarg1, arg); 580 } 581 } 582 583 static void pass_arg2(MacroAssembler* masm, Register arg) { 584 if (c_rarg2 != arg ) { 585 masm->mov(c_rarg2, arg); 586 } 587 } 588 589 static void pass_arg3(MacroAssembler* masm, Register arg) { 590 if (c_rarg3 != arg ) { 591 masm->mov(c_rarg3, arg); 592 } 593 } 594 595 void MacroAssembler::call_VM_base(Register oop_result, 596 Register java_thread, 597 Register last_java_sp, 598 address entry_point, 599 int number_of_arguments, 600 bool check_exceptions) { 601 // determine java_thread register 602 if (!java_thread->is_valid()) { 603 java_thread = rthread; 604 } 605 606 // determine last_java_sp register 607 if (!last_java_sp->is_valid()) { 608 last_java_sp = esp; 609 } 610 611 // debugging support 612 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 613 assert(java_thread == rthread, "unexpected register"); 614 #ifdef ASSERT 615 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 616 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 617 #endif // ASSERT 618 619 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 620 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 621 622 // push java thread (becomes first argument of C function) 623 624 mov(c_rarg0, java_thread); 625 626 // set last Java frame before call 627 assert(last_java_sp != rfp, "can't use rfp"); 628 629 Label l; 630 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 631 632 // do the call, remove parameters 633 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 634 635 // reset last Java frame 636 // Only interpreter should have to clear fp 637 reset_last_Java_frame(true, true); 638 639 // C++ interp handles this in the interpreter 640 check_and_handle_popframe(java_thread); 641 check_and_handle_earlyret(java_thread); 642 643 if (check_exceptions) { 644 // check for pending exceptions (java_thread is set upon return) 645 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 646 Label ok; 647 cbz(rscratch1, ok); 648 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 649 br(rscratch1); 650 bind(ok); 651 } 652 653 // get oop result if there is one and reset the value in the thread 654 if (oop_result->is_valid()) { 655 get_vm_result(oop_result, java_thread); 656 } 657 } 658 659 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 660 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 661 } 662 663 // Maybe emit a call via a trampoline. If the code cache is small 664 // trampolines won't be emitted. 665 666 void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) { 667 assert(entry.rspec().type() == relocInfo::runtime_call_type 668 || entry.rspec().type() == relocInfo::opt_virtual_call_type 669 || entry.rspec().type() == relocInfo::static_call_type 670 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 671 672 unsigned int start_offset = offset(); 673 if (far_branches() && !Compile::current()->in_scratch_emit_size()) { 674 emit_trampoline_stub(offset(), entry.target()); 675 } 676 677 if (cbuf) cbuf->set_insts_mark(); 678 relocate(entry.rspec()); 679 if (Assembler::reachable_from_branch_at(pc(), entry.target())) { 680 bl(entry.target()); 681 } else { 682 bl(pc()); 683 } 684 } 685 686 687 // Emit a trampoline stub for a call to a target which is too far away. 688 // 689 // code sequences: 690 // 691 // call-site: 692 // branch-and-link to <destination> or <trampoline stub> 693 // 694 // Related trampoline stub for this call site in the stub section: 695 // load the call target from the constant pool 696 // branch (LR still points to the call site above) 697 698 void MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 699 address dest) { 700 address stub = start_a_stub(Compile::MAX_stubs_size/2); 701 if (stub == NULL) { 702 start_a_stub(Compile::MAX_stubs_size/2); 703 Compile::current()->env()->record_out_of_memory_failure(); 704 return; 705 } 706 707 // Create a trampoline stub relocation which relates this trampoline stub 708 // with the call instruction at insts_call_instruction_offset in the 709 // instructions code-section. 710 align(wordSize); 711 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 712 + insts_call_instruction_offset)); 713 const int stub_start_offset = offset(); 714 715 // Now, create the trampoline stub's code: 716 // - load the call 717 // - call 718 Label target; 719 ldr(rscratch1, target); 720 br(rscratch1); 721 bind(target); 722 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 723 "should be"); 724 emit_int64((int64_t)dest); 725 726 const address stub_start_addr = addr_at(stub_start_offset); 727 728 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 729 730 end_a_stub(); 731 } 732 733 void MacroAssembler::ic_call(address entry) { 734 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 735 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 736 // unsigned long offset; 737 // ldr_constant(rscratch2, const_ptr); 738 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 739 trampoline_call(Address(entry, rh)); 740 } 741 742 // Implementation of call_VM versions 743 744 void MacroAssembler::call_VM(Register oop_result, 745 address entry_point, 746 bool check_exceptions) { 747 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 748 } 749 750 void MacroAssembler::call_VM(Register oop_result, 751 address entry_point, 752 Register arg_1, 753 bool check_exceptions) { 754 pass_arg1(this, arg_1); 755 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 756 } 757 758 void MacroAssembler::call_VM(Register oop_result, 759 address entry_point, 760 Register arg_1, 761 Register arg_2, 762 bool check_exceptions) { 763 assert(arg_1 != c_rarg2, "smashed arg"); 764 pass_arg2(this, arg_2); 765 pass_arg1(this, arg_1); 766 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 767 } 768 769 void MacroAssembler::call_VM(Register oop_result, 770 address entry_point, 771 Register arg_1, 772 Register arg_2, 773 Register arg_3, 774 bool check_exceptions) { 775 assert(arg_1 != c_rarg3, "smashed arg"); 776 assert(arg_2 != c_rarg3, "smashed arg"); 777 pass_arg3(this, arg_3); 778 779 assert(arg_1 != c_rarg2, "smashed arg"); 780 pass_arg2(this, arg_2); 781 782 pass_arg1(this, arg_1); 783 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 784 } 785 786 void MacroAssembler::call_VM(Register oop_result, 787 Register last_java_sp, 788 address entry_point, 789 int number_of_arguments, 790 bool check_exceptions) { 791 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 792 } 793 794 void MacroAssembler::call_VM(Register oop_result, 795 Register last_java_sp, 796 address entry_point, 797 Register arg_1, 798 bool check_exceptions) { 799 pass_arg1(this, arg_1); 800 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 801 } 802 803 void MacroAssembler::call_VM(Register oop_result, 804 Register last_java_sp, 805 address entry_point, 806 Register arg_1, 807 Register arg_2, 808 bool check_exceptions) { 809 810 assert(arg_1 != c_rarg2, "smashed arg"); 811 pass_arg2(this, arg_2); 812 pass_arg1(this, arg_1); 813 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 814 } 815 816 void MacroAssembler::call_VM(Register oop_result, 817 Register last_java_sp, 818 address entry_point, 819 Register arg_1, 820 Register arg_2, 821 Register arg_3, 822 bool check_exceptions) { 823 assert(arg_1 != c_rarg3, "smashed arg"); 824 assert(arg_2 != c_rarg3, "smashed arg"); 825 pass_arg3(this, arg_3); 826 assert(arg_1 != c_rarg2, "smashed arg"); 827 pass_arg2(this, arg_2); 828 pass_arg1(this, arg_1); 829 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 830 } 831 832 833 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 834 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 835 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 836 verify_oop(oop_result, "broken oop in call_VM_base"); 837 } 838 839 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 840 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 841 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 842 } 843 844 void MacroAssembler::align(int modulus) { 845 while (offset() % modulus != 0) nop(); 846 } 847 848 // these are no-ops overridden by InterpreterMacroAssembler 849 850 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 851 852 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 853 854 855 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 856 Register tmp, 857 int offset) { 858 intptr_t value = *delayed_value_addr; 859 if (value != 0) 860 return RegisterOrConstant(value + offset); 861 862 // load indirectly to solve generation ordering problem 863 ldr(tmp, ExternalAddress((address) delayed_value_addr)); 864 865 if (offset != 0) 866 add(tmp, tmp, offset); 867 868 return RegisterOrConstant(tmp); 869 } 870 871 872 void MacroAssembler:: notify(int type) { 873 if (type == bytecode_start) { 874 // set_last_Java_frame(esp, rfp, (address)NULL); 875 Assembler:: notify(type); 876 // reset_last_Java_frame(true, false); 877 } 878 else 879 Assembler:: notify(type); 880 } 881 882 // Look up the method for a megamorphic invokeinterface call. 883 // The target method is determined by <intf_klass, itable_index>. 884 // The receiver klass is in recv_klass. 885 // On success, the result will be in method_result, and execution falls through. 886 // On failure, execution transfers to the given label. 887 void MacroAssembler::lookup_interface_method(Register recv_klass, 888 Register intf_klass, 889 RegisterOrConstant itable_index, 890 Register method_result, 891 Register scan_temp, 892 Label& L_no_such_interface) { 893 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 894 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 895 "caller must use same register for non-constant itable index as for method"); 896 897 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 898 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 899 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 900 int scan_step = itableOffsetEntry::size() * wordSize; 901 int vte_size = vtableEntry::size() * wordSize; 902 assert(vte_size == wordSize, "else adjust times_vte_scale"); 903 904 ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); 905 906 // %%% Could store the aligned, prescaled offset in the klassoop. 907 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 908 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 909 add(scan_temp, scan_temp, vtable_base); 910 if (HeapWordsPerLong > 1) { 911 // Round up to align_object_offset boundary 912 // see code for instanceKlass::start_of_itable! 913 round_to(scan_temp, BytesPerLong); 914 } 915 916 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 917 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 918 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 919 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 920 if (itentry_off) 921 add(recv_klass, recv_klass, itentry_off); 922 923 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 924 // if (scan->interface() == intf) { 925 // result = (klass + scan->offset() + itable_index); 926 // } 927 // } 928 Label search, found_method; 929 930 for (int peel = 1; peel >= 0; peel--) { 931 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 932 cmp(intf_klass, method_result); 933 934 if (peel) { 935 br(Assembler::EQ, found_method); 936 } else { 937 br(Assembler::NE, search); 938 // (invert the test to fall through to found_method...) 939 } 940 941 if (!peel) break; 942 943 bind(search); 944 945 // Check that the previous entry is non-null. A null entry means that 946 // the receiver class doesn't implement the interface, and wasn't the 947 // same as when the caller was compiled. 948 cbz(method_result, L_no_such_interface); 949 add(scan_temp, scan_temp, scan_step); 950 } 951 952 bind(found_method); 953 954 // Got a hit. 955 ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 956 ldr(method_result, Address(recv_klass, scan_temp)); 957 } 958 959 // virtual method calling 960 void MacroAssembler::lookup_virtual_method(Register recv_klass, 961 RegisterOrConstant vtable_index, 962 Register method_result) { 963 const int base = InstanceKlass::vtable_start_offset() * wordSize; 964 assert(vtableEntry::size() * wordSize == 8, 965 "adjust the scaling in the code below"); 966 int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); 967 968 if (vtable_index.is_register()) { 969 lea(method_result, Address(recv_klass, 970 vtable_index.as_register(), 971 Address::lsl(LogBytesPerWord))); 972 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 973 } else { 974 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 975 ldr(method_result, Address(recv_klass, vtable_offset_in_bytes)); 976 } 977 } 978 979 void MacroAssembler::check_klass_subtype(Register sub_klass, 980 Register super_klass, 981 Register temp_reg, 982 Label& L_success) { 983 Label L_failure; 984 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 985 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 986 bind(L_failure); 987 } 988 989 990 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 991 Register super_klass, 992 Register temp_reg, 993 Label* L_success, 994 Label* L_failure, 995 Label* L_slow_path, 996 RegisterOrConstant super_check_offset) { 997 assert_different_registers(sub_klass, super_klass, temp_reg); 998 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 999 if (super_check_offset.is_register()) { 1000 assert_different_registers(sub_klass, super_klass, 1001 super_check_offset.as_register()); 1002 } else if (must_load_sco) { 1003 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1004 } 1005 1006 Label L_fallthrough; 1007 int label_nulls = 0; 1008 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1009 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1010 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1011 assert(label_nulls <= 1, "at most one NULL in the batch"); 1012 1013 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1014 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1015 Address super_check_offset_addr(super_klass, sco_offset); 1016 1017 // Hacked jmp, which may only be used just before L_fallthrough. 1018 #define final_jmp(label) \ 1019 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1020 else b(label) /*omit semi*/ 1021 1022 // If the pointers are equal, we are done (e.g., String[] elements). 1023 // This self-check enables sharing of secondary supertype arrays among 1024 // non-primary types such as array-of-interface. Otherwise, each such 1025 // type would need its own customized SSA. 1026 // We move this check to the front of the fast path because many 1027 // type checks are in fact trivially successful in this manner, 1028 // so we get a nicely predicted branch right at the start of the check. 1029 cmp(sub_klass, super_klass); 1030 br(Assembler::EQ, *L_success); 1031 1032 // Check the supertype display: 1033 if (must_load_sco) { 1034 ldrw(temp_reg, super_check_offset_addr); 1035 super_check_offset = RegisterOrConstant(temp_reg); 1036 } 1037 Address super_check_addr(sub_klass, super_check_offset); 1038 ldr(rscratch1, super_check_addr); 1039 cmp(super_klass, rscratch1); // load displayed supertype 1040 1041 // This check has worked decisively for primary supers. 1042 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1043 // (Secondary supers are interfaces and very deeply nested subtypes.) 1044 // This works in the same check above because of a tricky aliasing 1045 // between the super_cache and the primary super display elements. 1046 // (The 'super_check_addr' can address either, as the case requires.) 1047 // Note that the cache is updated below if it does not help us find 1048 // what we need immediately. 1049 // So if it was a primary super, we can just fail immediately. 1050 // Otherwise, it's the slow path for us (no success at this point). 1051 1052 if (super_check_offset.is_register()) { 1053 br(Assembler::EQ, *L_success); 1054 cmp(super_check_offset.as_register(), sc_offset); 1055 if (L_failure == &L_fallthrough) { 1056 br(Assembler::EQ, *L_slow_path); 1057 } else { 1058 br(Assembler::NE, *L_failure); 1059 final_jmp(*L_slow_path); 1060 } 1061 } else if (super_check_offset.as_constant() == sc_offset) { 1062 // Need a slow path; fast failure is impossible. 1063 if (L_slow_path == &L_fallthrough) { 1064 br(Assembler::EQ, *L_success); 1065 } else { 1066 br(Assembler::NE, *L_slow_path); 1067 final_jmp(*L_success); 1068 } 1069 } else { 1070 // No slow path; it's a fast decision. 1071 if (L_failure == &L_fallthrough) { 1072 br(Assembler::EQ, *L_success); 1073 } else { 1074 br(Assembler::NE, *L_failure); 1075 final_jmp(*L_success); 1076 } 1077 } 1078 1079 bind(L_fallthrough); 1080 1081 #undef final_jmp 1082 } 1083 1084 // These two are taken from x86, but they look generally useful 1085 1086 // scans count pointer sized words at [addr] for occurence of value, 1087 // generic 1088 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1089 Register scratch) { 1090 Label Lloop, Lexit; 1091 cbz(count, Lexit); 1092 bind(Lloop); 1093 ldr(scratch, post(addr, wordSize)); 1094 cmp(value, scratch); 1095 br(EQ, Lexit); 1096 sub(count, count, 1); 1097 cbnz(count, Lloop); 1098 bind(Lexit); 1099 } 1100 1101 // scans count 4 byte words at [addr] for occurence of value, 1102 // generic 1103 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1104 Register scratch) { 1105 Label Lloop, Lexit; 1106 cbz(count, Lexit); 1107 bind(Lloop); 1108 ldrw(scratch, post(addr, wordSize)); 1109 cmpw(value, scratch); 1110 br(EQ, Lexit); 1111 sub(count, count, 1); 1112 cbnz(count, Lloop); 1113 bind(Lexit); 1114 } 1115 1116 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1117 Register super_klass, 1118 Register temp_reg, 1119 Register temp2_reg, 1120 Label* L_success, 1121 Label* L_failure, 1122 bool set_cond_codes) { 1123 assert_different_registers(sub_klass, super_klass, temp_reg); 1124 if (temp2_reg != noreg) 1125 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1126 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1127 1128 Label L_fallthrough; 1129 int label_nulls = 0; 1130 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1131 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1132 assert(label_nulls <= 1, "at most one NULL in the batch"); 1133 1134 // a couple of useful fields in sub_klass: 1135 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1136 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1137 Address secondary_supers_addr(sub_klass, ss_offset); 1138 Address super_cache_addr( sub_klass, sc_offset); 1139 1140 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1141 1142 // Do a linear scan of the secondary super-klass chain. 1143 // This code is rarely used, so simplicity is a virtue here. 1144 // The repne_scan instruction uses fixed registers, which we must spill. 1145 // Don't worry too much about pre-existing connections with the input regs. 1146 1147 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1148 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1149 1150 // Get super_klass value into r0 (even if it was in r5 or r2). 1151 RegSet pushed_registers; 1152 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1153 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1154 1155 if (super_klass != r0 || UseCompressedOops) { 1156 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1157 } 1158 1159 push(pushed_registers, sp); 1160 1161 #ifndef PRODUCT 1162 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1163 Address pst_counter_addr(rscratch2); 1164 ldr(rscratch1, pst_counter_addr); 1165 add(rscratch1, rscratch1, 1); 1166 str(rscratch1, pst_counter_addr); 1167 #endif //PRODUCT 1168 1169 // We will consult the secondary-super array. 1170 ldr(r5, secondary_supers_addr); 1171 // Load the array length. 1172 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1173 // Skip to start of data. 1174 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1175 1176 cmp(sp, zr); // Clear Z flag; SP is never zero 1177 // Scan R2 words at [R5] for an occurrence of R0. 1178 // Set NZ/Z based on last compare. 1179 repne_scan(r5, r0, r2, rscratch1); 1180 1181 // Unspill the temp. registers: 1182 pop(pushed_registers, sp); 1183 1184 br(Assembler::NE, *L_failure); 1185 1186 // Success. Cache the super we found and proceed in triumph. 1187 str(super_klass, super_cache_addr); 1188 1189 if (L_success != &L_fallthrough) { 1190 b(*L_success); 1191 } 1192 1193 #undef IS_A_TEMP 1194 1195 bind(L_fallthrough); 1196 } 1197 1198 1199 void MacroAssembler::verify_oop(Register reg, const char* s) { 1200 if (!VerifyOops) return; 1201 1202 // Pass register number to verify_oop_subroutine 1203 const char* b = NULL; 1204 { 1205 ResourceMark rm; 1206 stringStream ss; 1207 ss.print("verify_oop: %s: %s", reg->name(), s); 1208 b = code_string(ss.as_string()); 1209 } 1210 BLOCK_COMMENT("verify_oop {"); 1211 1212 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1213 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1214 1215 mov(r0, reg); 1216 mov(rscratch1, (address)b); 1217 1218 // call indirectly to solve generation ordering problem 1219 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1220 ldr(rscratch2, Address(rscratch2)); 1221 blr(rscratch2); 1222 1223 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1224 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1225 1226 BLOCK_COMMENT("} verify_oop"); 1227 } 1228 1229 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 1230 if (!VerifyOops) return; 1231 1232 const char* b = NULL; 1233 { 1234 ResourceMark rm; 1235 stringStream ss; 1236 ss.print("verify_oop_addr: %s", s); 1237 b = code_string(ss.as_string()); 1238 } 1239 BLOCK_COMMENT("verify_oop_addr {"); 1240 1241 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1242 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1243 1244 // addr may contain sp so we will have to adjust it based on the 1245 // pushes that we just did. 1246 if (addr.uses(sp)) { 1247 lea(r0, addr); 1248 ldr(r0, Address(r0, 4 * wordSize)); 1249 } else { 1250 ldr(r0, addr); 1251 } 1252 mov(rscratch1, (address)b); 1253 1254 // call indirectly to solve generation ordering problem 1255 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1256 ldr(rscratch2, Address(rscratch2)); 1257 blr(rscratch2); 1258 1259 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1260 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1261 1262 BLOCK_COMMENT("} verify_oop_addr"); 1263 } 1264 1265 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1266 int extra_slot_offset) { 1267 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1268 int stackElementSize = Interpreter::stackElementSize; 1269 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1270 #ifdef ASSERT 1271 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1272 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1273 #endif 1274 if (arg_slot.is_constant()) { 1275 return Address(esp, arg_slot.as_constant() * stackElementSize 1276 + offset); 1277 } else { 1278 add(rscratch1, esp, arg_slot.as_register(), 1279 ext::uxtx, exact_log2(stackElementSize)); 1280 return Address(rscratch1, offset); 1281 } 1282 } 1283 1284 void MacroAssembler::call_VM_leaf_base(address entry_point, 1285 int number_of_arguments, 1286 Label *retaddr) { 1287 call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr); 1288 } 1289 1290 void MacroAssembler::call_VM_leaf_base1(address entry_point, 1291 int number_of_gp_arguments, 1292 int number_of_fp_arguments, 1293 ret_type type, 1294 Label *retaddr) { 1295 Label E, L; 1296 1297 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1298 1299 // We add 1 to number_of_arguments because the thread in arg0 is 1300 // not counted 1301 mov(rscratch1, entry_point); 1302 blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type); 1303 if (retaddr) 1304 bind(*retaddr); 1305 1306 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1307 maybe_isb(); 1308 } 1309 1310 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1311 call_VM_leaf_base(entry_point, number_of_arguments); 1312 } 1313 1314 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1315 pass_arg0(this, arg_0); 1316 call_VM_leaf_base(entry_point, 1); 1317 } 1318 1319 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1320 pass_arg0(this, arg_0); 1321 pass_arg1(this, arg_1); 1322 call_VM_leaf_base(entry_point, 2); 1323 } 1324 1325 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1326 Register arg_1, Register arg_2) { 1327 pass_arg0(this, arg_0); 1328 pass_arg1(this, arg_1); 1329 pass_arg2(this, arg_2); 1330 call_VM_leaf_base(entry_point, 3); 1331 } 1332 1333 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1334 pass_arg0(this, arg_0); 1335 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1336 } 1337 1338 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1339 1340 assert(arg_0 != c_rarg1, "smashed arg"); 1341 pass_arg1(this, arg_1); 1342 pass_arg0(this, arg_0); 1343 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1344 } 1345 1346 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1347 assert(arg_0 != c_rarg2, "smashed arg"); 1348 assert(arg_1 != c_rarg2, "smashed arg"); 1349 pass_arg2(this, arg_2); 1350 assert(arg_0 != c_rarg1, "smashed arg"); 1351 pass_arg1(this, arg_1); 1352 pass_arg0(this, arg_0); 1353 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1354 } 1355 1356 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1357 assert(arg_0 != c_rarg3, "smashed arg"); 1358 assert(arg_1 != c_rarg3, "smashed arg"); 1359 assert(arg_2 != c_rarg3, "smashed arg"); 1360 pass_arg3(this, arg_3); 1361 assert(arg_0 != c_rarg2, "smashed arg"); 1362 assert(arg_1 != c_rarg2, "smashed arg"); 1363 pass_arg2(this, arg_2); 1364 assert(arg_0 != c_rarg1, "smashed arg"); 1365 pass_arg1(this, arg_1); 1366 pass_arg0(this, arg_0); 1367 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1368 } 1369 1370 void MacroAssembler::null_check(Register reg, int offset) { 1371 if (needs_explicit_null_check(offset)) { 1372 // provoke OS NULL exception if reg = NULL by 1373 // accessing M[reg] w/o changing any registers 1374 // NOTE: this is plenty to provoke a segv 1375 ldr(zr, Address(reg)); 1376 } else { 1377 // nothing to do, (later) access of M[reg + offset] 1378 // will provoke OS NULL exception if reg = NULL 1379 } 1380 } 1381 1382 // MacroAssembler protected routines needed to implement 1383 // public methods 1384 1385 void MacroAssembler::mov(Register r, Address dest) { 1386 code_section()->relocate(pc(), dest.rspec()); 1387 u_int64_t imm64 = (u_int64_t)dest.target(); 1388 movptr(r, imm64); 1389 } 1390 1391 // Move a constant pointer into r. In AArch64 mode the virtual 1392 // address space is 48 bits in size, so we only need three 1393 // instructions to create a patchable instruction sequence that can 1394 // reach anywhere. 1395 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1396 #ifndef PRODUCT 1397 { 1398 char buffer[64]; 1399 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1400 block_comment(buffer); 1401 } 1402 #endif 1403 assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); 1404 movz(r, imm64 & 0xffff); 1405 imm64 >>= 16; 1406 movk(r, imm64 & 0xffff, 16); 1407 imm64 >>= 16; 1408 movk(r, imm64 & 0xffff, 32); 1409 } 1410 1411 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { 1412 assert(T != T1D && T != T2D, "invalid arrangement"); 1413 if (T == T8B || T == T16B) { 1414 movi(Vd, T, imm32 & 0xff, 0); 1415 return; 1416 } 1417 u_int32_t nimm32 = ~imm32; 1418 if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; } 1419 u_int32_t x = imm32; 1420 int movi_cnt = 0; 1421 int movn_cnt = 0; 1422 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } 1423 x = nimm32; 1424 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } 1425 if (movn_cnt < movi_cnt) imm32 = nimm32; 1426 unsigned lsl = 0; 1427 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1428 if (movn_cnt < movi_cnt) 1429 mvni(Vd, T, imm32 & 0xff, lsl); 1430 else 1431 movi(Vd, T, imm32 & 0xff, lsl); 1432 imm32 >>= 8; lsl += 8; 1433 while (imm32) { 1434 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1435 if (movn_cnt < movi_cnt) 1436 bici(Vd, T, imm32 & 0xff, lsl); 1437 else 1438 orri(Vd, T, imm32 & 0xff, lsl); 1439 lsl += 8; imm32 >>= 8; 1440 } 1441 } 1442 1443 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) 1444 { 1445 #ifndef PRODUCT 1446 { 1447 char buffer[64]; 1448 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1449 block_comment(buffer); 1450 } 1451 #endif 1452 if (operand_valid_for_logical_immediate(false, imm64)) { 1453 orr(dst, zr, imm64); 1454 } else { 1455 // we can use a combination of MOVZ or MOVN with 1456 // MOVK to build up the constant 1457 u_int64_t imm_h[4]; 1458 int zero_count = 0; 1459 int neg_count = 0; 1460 int i; 1461 for (i = 0; i < 4; i++) { 1462 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1463 if (imm_h[i] == 0) { 1464 zero_count++; 1465 } else if (imm_h[i] == 0xffffL) { 1466 neg_count++; 1467 } 1468 } 1469 if (zero_count == 4) { 1470 // one MOVZ will do 1471 movz(dst, 0); 1472 } else if (neg_count == 4) { 1473 // one MOVN will do 1474 movn(dst, 0); 1475 } else if (zero_count == 3) { 1476 for (i = 0; i < 4; i++) { 1477 if (imm_h[i] != 0L) { 1478 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1479 break; 1480 } 1481 } 1482 } else if (neg_count == 3) { 1483 // one MOVN will do 1484 for (int i = 0; i < 4; i++) { 1485 if (imm_h[i] != 0xffffL) { 1486 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1487 break; 1488 } 1489 } 1490 } else if (zero_count == 2) { 1491 // one MOVZ and one MOVK will do 1492 for (i = 0; i < 3; i++) { 1493 if (imm_h[i] != 0L) { 1494 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1495 i++; 1496 break; 1497 } 1498 } 1499 for (;i < 4; i++) { 1500 if (imm_h[i] != 0L) { 1501 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1502 } 1503 } 1504 } else if (neg_count == 2) { 1505 // one MOVN and one MOVK will do 1506 for (i = 0; i < 4; i++) { 1507 if (imm_h[i] != 0xffffL) { 1508 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1509 i++; 1510 break; 1511 } 1512 } 1513 for (;i < 4; i++) { 1514 if (imm_h[i] != 0xffffL) { 1515 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1516 } 1517 } 1518 } else if (zero_count == 1) { 1519 // one MOVZ and two MOVKs will do 1520 for (i = 0; i < 4; i++) { 1521 if (imm_h[i] != 0L) { 1522 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1523 i++; 1524 break; 1525 } 1526 } 1527 for (;i < 4; i++) { 1528 if (imm_h[i] != 0x0L) { 1529 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1530 } 1531 } 1532 } else if (neg_count == 1) { 1533 // one MOVN and two MOVKs will do 1534 for (i = 0; i < 4; i++) { 1535 if (imm_h[i] != 0xffffL) { 1536 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1537 i++; 1538 break; 1539 } 1540 } 1541 for (;i < 4; i++) { 1542 if (imm_h[i] != 0xffffL) { 1543 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1544 } 1545 } 1546 } else { 1547 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1548 movz(dst, (u_int32_t)imm_h[0], 0); 1549 for (i = 1; i < 4; i++) { 1550 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1551 } 1552 } 1553 } 1554 } 1555 1556 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) 1557 { 1558 #ifndef PRODUCT 1559 { 1560 char buffer[64]; 1561 snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32); 1562 block_comment(buffer); 1563 } 1564 #endif 1565 if (operand_valid_for_logical_immediate(true, imm32)) { 1566 orrw(dst, zr, imm32); 1567 } else { 1568 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1569 // constant 1570 u_int32_t imm_h[2]; 1571 imm_h[0] = imm32 & 0xffff; 1572 imm_h[1] = ((imm32 >> 16) & 0xffff); 1573 if (imm_h[0] == 0) { 1574 movzw(dst, imm_h[1], 16); 1575 } else if (imm_h[0] == 0xffff) { 1576 movnw(dst, imm_h[1] ^ 0xffff, 16); 1577 } else if (imm_h[1] == 0) { 1578 movzw(dst, imm_h[0], 0); 1579 } else if (imm_h[1] == 0xffff) { 1580 movnw(dst, imm_h[0] ^ 0xffff, 0); 1581 } else { 1582 // use a MOVZ and MOVK (makes it easier to debug) 1583 movzw(dst, imm_h[0], 0); 1584 movkw(dst, imm_h[1], 16); 1585 } 1586 } 1587 } 1588 1589 // Form an address from base + offset in Rd. Rd may or may 1590 // not actually be used: you must use the Address that is returned. 1591 // It is up to you to ensure that the shift provided matches the size 1592 // of your data. 1593 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { 1594 if (Address::offset_ok_for_immed(byte_offset, shift)) 1595 // It fits; no need for any heroics 1596 return Address(base, byte_offset); 1597 1598 // Don't do anything clever with negative or misaligned offsets 1599 unsigned mask = (1 << shift) - 1; 1600 if (byte_offset < 0 || byte_offset & mask) { 1601 mov(Rd, byte_offset); 1602 add(Rd, base, Rd); 1603 return Address(Rd); 1604 } 1605 1606 // See if we can do this with two 12-bit offsets 1607 { 1608 unsigned long word_offset = byte_offset >> shift; 1609 unsigned long masked_offset = word_offset & 0xfff000; 1610 if (Address::offset_ok_for_immed(word_offset - masked_offset) 1611 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1612 add(Rd, base, masked_offset << shift); 1613 word_offset -= masked_offset; 1614 return Address(Rd, word_offset << shift); 1615 } 1616 } 1617 1618 // Do it the hard way 1619 mov(Rd, byte_offset); 1620 add(Rd, base, Rd); 1621 return Address(Rd); 1622 } 1623 1624 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) { 1625 Label retry_load; 1626 bind(retry_load); 1627 // flush and load exclusive from the memory location 1628 ldxrw(tmp, counter_addr); 1629 addw(tmp, tmp, 1); 1630 // if we store+flush with no intervening write tmp wil be zero 1631 stxrw(tmp, tmp, counter_addr); 1632 cbnzw(tmp, retry_load); 1633 } 1634 1635 1636 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1637 bool want_remainder, Register scratch) 1638 { 1639 // Full implementation of Java idiv and irem. The function 1640 // returns the (pc) offset of the div instruction - may be needed 1641 // for implicit exceptions. 1642 // 1643 // constraint : ra/rb =/= scratch 1644 // normal case 1645 // 1646 // input : ra: dividend 1647 // rb: divisor 1648 // 1649 // result: either 1650 // quotient (= ra idiv rb) 1651 // remainder (= ra irem rb) 1652 1653 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1654 1655 int idivl_offset = offset(); 1656 if (! want_remainder) { 1657 sdivw(result, ra, rb); 1658 } else { 1659 sdivw(scratch, ra, rb); 1660 Assembler::msubw(result, scratch, rb, ra); 1661 } 1662 1663 return idivl_offset; 1664 } 1665 1666 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1667 bool want_remainder, Register scratch) 1668 { 1669 // Full implementation of Java ldiv and lrem. The function 1670 // returns the (pc) offset of the div instruction - may be needed 1671 // for implicit exceptions. 1672 // 1673 // constraint : ra/rb =/= scratch 1674 // normal case 1675 // 1676 // input : ra: dividend 1677 // rb: divisor 1678 // 1679 // result: either 1680 // quotient (= ra idiv rb) 1681 // remainder (= ra irem rb) 1682 1683 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1684 1685 int idivq_offset = offset(); 1686 if (! want_remainder) { 1687 sdiv(result, ra, rb); 1688 } else { 1689 sdiv(scratch, ra, rb); 1690 Assembler::msub(result, scratch, rb, ra); 1691 } 1692 1693 return idivq_offset; 1694 } 1695 1696 // MacroAssembler routines found actually to be needed 1697 1698 void MacroAssembler::push(Register src) 1699 { 1700 str(src, Address(pre(esp, -1 * wordSize))); 1701 } 1702 1703 void MacroAssembler::pop(Register dst) 1704 { 1705 ldr(dst, Address(post(esp, 1 * wordSize))); 1706 } 1707 1708 // Note: load_unsigned_short used to be called load_unsigned_word. 1709 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1710 int off = offset(); 1711 ldrh(dst, src); 1712 return off; 1713 } 1714 1715 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1716 int off = offset(); 1717 ldrb(dst, src); 1718 return off; 1719 } 1720 1721 int MacroAssembler::load_signed_short(Register dst, Address src) { 1722 int off = offset(); 1723 ldrsh(dst, src); 1724 return off; 1725 } 1726 1727 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1728 int off = offset(); 1729 ldrsb(dst, src); 1730 return off; 1731 } 1732 1733 int MacroAssembler::load_signed_short32(Register dst, Address src) { 1734 int off = offset(); 1735 ldrshw(dst, src); 1736 return off; 1737 } 1738 1739 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 1740 int off = offset(); 1741 ldrsbw(dst, src); 1742 return off; 1743 } 1744 1745 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1746 switch (size_in_bytes) { 1747 case 8: ldr(dst, src); break; 1748 case 4: ldrw(dst, src); break; 1749 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1750 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1751 default: ShouldNotReachHere(); 1752 } 1753 } 1754 1755 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1756 switch (size_in_bytes) { 1757 case 8: str(src, dst); break; 1758 case 4: strw(src, dst); break; 1759 case 2: strh(src, dst); break; 1760 case 1: strb(src, dst); break; 1761 default: ShouldNotReachHere(); 1762 } 1763 } 1764 1765 void MacroAssembler::decrementw(Register reg, int value) 1766 { 1767 if (value < 0) { incrementw(reg, -value); return; } 1768 if (value == 0) { return; } 1769 if (value < (1 << 12)) { subw(reg, reg, value); return; } 1770 /* else */ { 1771 guarantee(reg != rscratch2, "invalid dst for register decrement"); 1772 movw(rscratch2, (unsigned)value); 1773 subw(reg, reg, rscratch2); 1774 } 1775 } 1776 1777 void MacroAssembler::decrement(Register reg, int value) 1778 { 1779 if (value < 0) { increment(reg, -value); return; } 1780 if (value == 0) { return; } 1781 if (value < (1 << 12)) { sub(reg, reg, value); return; } 1782 /* else */ { 1783 assert(reg != rscratch2, "invalid dst for register decrement"); 1784 mov(rscratch2, (unsigned long)value); 1785 sub(reg, reg, rscratch2); 1786 } 1787 } 1788 1789 void MacroAssembler::decrementw(Address dst, int value) 1790 { 1791 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 1792 ldrw(rscratch1, dst); 1793 decrementw(rscratch1, value); 1794 strw(rscratch1, dst); 1795 } 1796 1797 void MacroAssembler::decrement(Address dst, int value) 1798 { 1799 assert(!dst.uses(rscratch1), "invalid address for decrement"); 1800 ldr(rscratch1, dst); 1801 decrement(rscratch1, value); 1802 str(rscratch1, dst); 1803 } 1804 1805 void MacroAssembler::incrementw(Register reg, int value) 1806 { 1807 if (value < 0) { decrementw(reg, -value); return; } 1808 if (value == 0) { return; } 1809 if (value < (1 << 12)) { addw(reg, reg, value); return; } 1810 /* else */ { 1811 assert(reg != rscratch2, "invalid dst for register increment"); 1812 movw(rscratch2, (unsigned)value); 1813 addw(reg, reg, rscratch2); 1814 } 1815 } 1816 1817 void MacroAssembler::increment(Register reg, int value) 1818 { 1819 if (value < 0) { decrement(reg, -value); return; } 1820 if (value == 0) { return; } 1821 if (value < (1 << 12)) { add(reg, reg, value); return; } 1822 /* else */ { 1823 assert(reg != rscratch2, "invalid dst for register increment"); 1824 movw(rscratch2, (unsigned)value); 1825 add(reg, reg, rscratch2); 1826 } 1827 } 1828 1829 void MacroAssembler::incrementw(Address dst, int value) 1830 { 1831 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1832 ldrw(rscratch1, dst); 1833 incrementw(rscratch1, value); 1834 strw(rscratch1, dst); 1835 } 1836 1837 void MacroAssembler::increment(Address dst, int value) 1838 { 1839 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1840 ldr(rscratch1, dst); 1841 increment(rscratch1, value); 1842 str(rscratch1, dst); 1843 } 1844 1845 1846 void MacroAssembler::pusha() { 1847 push(0x7fffffff, sp); 1848 } 1849 1850 void MacroAssembler::popa() { 1851 pop(0x7fffffff, sp); 1852 } 1853 1854 // Push lots of registers in the bit set supplied. Don't push sp. 1855 // Return the number of words pushed 1856 int MacroAssembler::push(unsigned int bitset, Register stack) { 1857 int words_pushed = 0; 1858 1859 // Scan bitset to accumulate register pairs 1860 unsigned char regs[32]; 1861 int count = 0; 1862 for (int reg = 0; reg <= 30; reg++) { 1863 if (1 & bitset) 1864 regs[count++] = reg; 1865 bitset >>= 1; 1866 } 1867 regs[count++] = zr->encoding_nocheck(); 1868 count &= ~1; // Only push an even nuber of regs 1869 1870 if (count) { 1871 stp(as_Register(regs[0]), as_Register(regs[1]), 1872 Address(pre(stack, -count * wordSize))); 1873 words_pushed += 2; 1874 } 1875 for (int i = 2; i < count; i += 2) { 1876 stp(as_Register(regs[i]), as_Register(regs[i+1]), 1877 Address(stack, i * wordSize)); 1878 words_pushed += 2; 1879 } 1880 1881 assert(words_pushed == count, "oops, pushed != count"); 1882 1883 return count; 1884 } 1885 1886 int MacroAssembler::pop(unsigned int bitset, Register stack) { 1887 int words_pushed = 0; 1888 1889 // Scan bitset to accumulate register pairs 1890 unsigned char regs[32]; 1891 int count = 0; 1892 for (int reg = 0; reg <= 30; reg++) { 1893 if (1 & bitset) 1894 regs[count++] = reg; 1895 bitset >>= 1; 1896 } 1897 regs[count++] = zr->encoding_nocheck(); 1898 count &= ~1; 1899 1900 for (int i = 2; i < count; i += 2) { 1901 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 1902 Address(stack, i * wordSize)); 1903 words_pushed += 2; 1904 } 1905 if (count) { 1906 ldp(as_Register(regs[0]), as_Register(regs[1]), 1907 Address(post(stack, count * wordSize))); 1908 words_pushed += 2; 1909 } 1910 1911 assert(words_pushed == count, "oops, pushed != count"); 1912 1913 return count; 1914 } 1915 #ifdef ASSERT 1916 void MacroAssembler::verify_heapbase(const char* msg) { 1917 #if 0 1918 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 1919 assert (Universe::heap() != NULL, "java heap should be initialized"); 1920 if (CheckCompressedOops) { 1921 Label ok; 1922 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 1923 cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 1924 br(Assembler::EQ, ok); 1925 stop(msg); 1926 bind(ok); 1927 pop(1 << rscratch1->encoding(), sp); 1928 } 1929 #endif 1930 } 1931 #endif 1932 1933 void MacroAssembler::stop(const char* msg) { 1934 address ip = pc(); 1935 pusha(); 1936 mov(c_rarg0, (address)msg); 1937 mov(c_rarg1, (address)ip); 1938 mov(c_rarg2, sp); 1939 mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); 1940 // call(c_rarg3); 1941 blrt(c_rarg3, 3, 0, 1); 1942 hlt(0); 1943 } 1944 1945 // If a constant does not fit in an immediate field, generate some 1946 // number of MOV instructions and then perform the operation. 1947 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1948 add_sub_imm_insn insn1, 1949 add_sub_reg_insn insn2) { 1950 assert(Rd != zr, "Rd = zr and not setting flags?"); 1951 if (operand_valid_for_add_sub_immediate((int)imm)) { 1952 (this->*insn1)(Rd, Rn, imm); 1953 } else { 1954 if (uabs(imm) < (1 << 24)) { 1955 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 1956 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 1957 } else { 1958 assert_different_registers(Rd, Rn); 1959 mov(Rd, (uint64_t)imm); 1960 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1961 } 1962 } 1963 } 1964 1965 // Seperate vsn which sets the flags. Optimisations are more restricted 1966 // because we must set the flags correctly. 1967 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1968 add_sub_imm_insn insn1, 1969 add_sub_reg_insn insn2) { 1970 if (operand_valid_for_add_sub_immediate((int)imm)) { 1971 (this->*insn1)(Rd, Rn, imm); 1972 } else { 1973 assert_different_registers(Rd, Rn); 1974 assert(Rd != zr, "overflow in immediate operand"); 1975 mov(Rd, (uint64_t)imm); 1976 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1977 } 1978 } 1979 1980 1981 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 1982 if (increment.is_register()) { 1983 add(Rd, Rn, increment.as_register()); 1984 } else { 1985 add(Rd, Rn, increment.as_constant()); 1986 } 1987 } 1988 1989 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 1990 if (increment.is_register()) { 1991 addw(Rd, Rn, increment.as_register()); 1992 } else { 1993 addw(Rd, Rn, increment.as_constant()); 1994 } 1995 } 1996 1997 void MacroAssembler::reinit_heapbase() 1998 { 1999 if (UseCompressedOops) { 2000 if (Universe::is_fully_initialized()) { 2001 mov(rheapbase, Universe::narrow_ptrs_base()); 2002 } else { 2003 lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 2004 ldr(rheapbase, Address(rheapbase)); 2005 } 2006 } 2007 } 2008 2009 // this simulates the behaviour of the x86 cmpxchg instruction using a 2010 // load linked/store conditional pair. we use the acquire/release 2011 // versions of these instructions so that we flush pending writes as 2012 // per Java semantics. 2013 2014 // n.b the x86 version assumes the old value to be compared against is 2015 // in rax and updates rax with the value located in memory if the 2016 // cmpxchg fails. we supply a register for the old value explicitly 2017 2018 // the aarch64 load linked/store conditional instructions do not 2019 // accept an offset. so, unlike x86, we must provide a plain register 2020 // to identify the memory word to be compared/exchanged rather than a 2021 // register+offset Address. 2022 2023 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2024 Label &succeed, Label *fail) { 2025 // oldv holds comparison value 2026 // newv holds value to write in exchange 2027 // addr identifies memory word to compare against/update 2028 // tmp returns 0/1 for success/failure 2029 Label retry_load, nope; 2030 2031 bind(retry_load); 2032 // flush and load exclusive from the memory location 2033 // and fail if it is not what we expect 2034 ldaxr(tmp, addr); 2035 cmp(tmp, oldv); 2036 br(Assembler::NE, nope); 2037 // if we store+flush with no intervening write tmp wil be zero 2038 stlxr(tmp, newv, addr); 2039 cbzw(tmp, succeed); 2040 // retry so we only ever return after a load fails to compare 2041 // ensures we don't return a stale value after a failed write. 2042 b(retry_load); 2043 // if the memory word differs we return it in oldv and signal a fail 2044 bind(nope); 2045 membar(AnyAny); 2046 mov(oldv, tmp); 2047 if (fail) 2048 b(*fail); 2049 } 2050 2051 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2052 Label &succeed, Label *fail) { 2053 // oldv holds comparison value 2054 // newv holds value to write in exchange 2055 // addr identifies memory word to compare against/update 2056 // tmp returns 0/1 for success/failure 2057 Label retry_load, nope; 2058 2059 bind(retry_load); 2060 // flush and load exclusive from the memory location 2061 // and fail if it is not what we expect 2062 ldaxrw(tmp, addr); 2063 cmp(tmp, oldv); 2064 br(Assembler::NE, nope); 2065 // if we store+flush with no intervening write tmp wil be zero 2066 stlxrw(tmp, newv, addr); 2067 cbzw(tmp, succeed); 2068 // retry so we only ever return after a load fails to compare 2069 // ensures we don't return a stale value after a failed write. 2070 b(retry_load); 2071 // if the memory word differs we return it in oldv and signal a fail 2072 bind(nope); 2073 membar(AnyAny); 2074 mov(oldv, tmp); 2075 if (fail) 2076 b(*fail); 2077 } 2078 2079 static bool different(Register a, RegisterOrConstant b, Register c) { 2080 if (b.is_constant()) 2081 return a != c; 2082 else 2083 return a != b.as_register() && a != c && b.as_register() != c; 2084 } 2085 2086 #define ATOMIC_OP(LDXR, OP, STXR) \ 2087 void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ 2088 Register result = rscratch2; \ 2089 if (prev->is_valid()) \ 2090 result = different(prev, incr, addr) ? prev : rscratch2; \ 2091 \ 2092 Label retry_load; \ 2093 bind(retry_load); \ 2094 LDXR(result, addr); \ 2095 OP(rscratch1, result, incr); \ 2096 STXR(rscratch1, rscratch1, addr); \ 2097 cbnzw(rscratch1, retry_load); \ 2098 if (prev->is_valid() && prev != result) \ 2099 mov(prev, result); \ 2100 } 2101 2102 ATOMIC_OP(ldxr, add, stxr) 2103 ATOMIC_OP(ldxrw, addw, stxrw) 2104 2105 #undef ATOMIC_OP 2106 2107 #define ATOMIC_XCHG(OP, LDXR, STXR) \ 2108 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2109 Register result = rscratch2; \ 2110 if (prev->is_valid()) \ 2111 result = different(prev, newv, addr) ? prev : rscratch2; \ 2112 \ 2113 Label retry_load; \ 2114 bind(retry_load); \ 2115 LDXR(result, addr); \ 2116 STXR(rscratch1, newv, addr); \ 2117 cbnzw(rscratch1, retry_load); \ 2118 if (prev->is_valid() && prev != result) \ 2119 mov(prev, result); \ 2120 } 2121 2122 ATOMIC_XCHG(xchg, ldxr, stxr) 2123 ATOMIC_XCHG(xchgw, ldxrw, stxrw) 2124 2125 #undef ATOMIC_XCHG 2126 2127 void MacroAssembler::incr_allocated_bytes(Register thread, 2128 Register var_size_in_bytes, 2129 int con_size_in_bytes, 2130 Register t1) { 2131 if (!thread->is_valid()) { 2132 thread = rthread; 2133 } 2134 assert(t1->is_valid(), "need temp reg"); 2135 2136 ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2137 if (var_size_in_bytes->is_valid()) { 2138 add(t1, t1, var_size_in_bytes); 2139 } else { 2140 add(t1, t1, con_size_in_bytes); 2141 } 2142 str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2143 } 2144 2145 #ifndef PRODUCT 2146 extern "C" void findpc(intptr_t x); 2147 #endif 2148 2149 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2150 { 2151 // In order to get locks to work, we need to fake a in_VM state 2152 if (ShowMessageBoxOnError ) { 2153 JavaThread* thread = JavaThread::current(); 2154 JavaThreadState saved_state = thread->thread_state(); 2155 thread->set_thread_state(_thread_in_vm); 2156 #ifndef PRODUCT 2157 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2158 ttyLocker ttyl; 2159 BytecodeCounter::print(); 2160 } 2161 #endif 2162 if (os::message_box(msg, "Execution stopped, print registers?")) { 2163 ttyLocker ttyl; 2164 tty->print_cr(" pc = 0x%016lx", pc); 2165 #ifndef PRODUCT 2166 tty->cr(); 2167 findpc(pc); 2168 tty->cr(); 2169 #endif 2170 tty->print_cr(" r0 = 0x%016lx", regs[0]); 2171 tty->print_cr(" r1 = 0x%016lx", regs[1]); 2172 tty->print_cr(" r2 = 0x%016lx", regs[2]); 2173 tty->print_cr(" r3 = 0x%016lx", regs[3]); 2174 tty->print_cr(" r4 = 0x%016lx", regs[4]); 2175 tty->print_cr(" r5 = 0x%016lx", regs[5]); 2176 tty->print_cr(" r6 = 0x%016lx", regs[6]); 2177 tty->print_cr(" r7 = 0x%016lx", regs[7]); 2178 tty->print_cr(" r8 = 0x%016lx", regs[8]); 2179 tty->print_cr(" r9 = 0x%016lx", regs[9]); 2180 tty->print_cr("r10 = 0x%016lx", regs[10]); 2181 tty->print_cr("r11 = 0x%016lx", regs[11]); 2182 tty->print_cr("r12 = 0x%016lx", regs[12]); 2183 tty->print_cr("r13 = 0x%016lx", regs[13]); 2184 tty->print_cr("r14 = 0x%016lx", regs[14]); 2185 tty->print_cr("r15 = 0x%016lx", regs[15]); 2186 tty->print_cr("r16 = 0x%016lx", regs[16]); 2187 tty->print_cr("r17 = 0x%016lx", regs[17]); 2188 tty->print_cr("r18 = 0x%016lx", regs[18]); 2189 tty->print_cr("r19 = 0x%016lx", regs[19]); 2190 tty->print_cr("r20 = 0x%016lx", regs[20]); 2191 tty->print_cr("r21 = 0x%016lx", regs[21]); 2192 tty->print_cr("r22 = 0x%016lx", regs[22]); 2193 tty->print_cr("r23 = 0x%016lx", regs[23]); 2194 tty->print_cr("r24 = 0x%016lx", regs[24]); 2195 tty->print_cr("r25 = 0x%016lx", regs[25]); 2196 tty->print_cr("r26 = 0x%016lx", regs[26]); 2197 tty->print_cr("r27 = 0x%016lx", regs[27]); 2198 tty->print_cr("r28 = 0x%016lx", regs[28]); 2199 tty->print_cr("r30 = 0x%016lx", regs[30]); 2200 tty->print_cr("r31 = 0x%016lx", regs[31]); 2201 BREAKPOINT; 2202 } 2203 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 2204 } else { 2205 ttyLocker ttyl; 2206 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 2207 msg); 2208 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 2209 } 2210 } 2211 2212 #ifdef BUILTIN_SIM 2213 // routine to generate an x86 prolog for a stub function which 2214 // bootstraps into the generated ARM code which directly follows the 2215 // stub 2216 // 2217 // the argument encodes the number of general and fp registers 2218 // passed by the caller and the callng convention (currently just 2219 // the number of general registers and assumes C argument passing) 2220 2221 extern "C" { 2222 int aarch64_stub_prolog_size(); 2223 void aarch64_stub_prolog(); 2224 void aarch64_prolog(); 2225 } 2226 2227 void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, 2228 address *prolog_ptr) 2229 { 2230 int calltype = (((ret_type & 0x3) << 8) | 2231 ((fp_arg_count & 0xf) << 4) | 2232 (gp_arg_count & 0xf)); 2233 2234 // the addresses for the x86 to ARM entry code we need to use 2235 address start = pc(); 2236 // printf("start = %lx\n", start); 2237 int byteCount = aarch64_stub_prolog_size(); 2238 // printf("byteCount = %x\n", byteCount); 2239 int instructionCount = (byteCount + 3)/ 4; 2240 // printf("instructionCount = %x\n", instructionCount); 2241 for (int i = 0; i < instructionCount; i++) { 2242 nop(); 2243 } 2244 2245 memcpy(start, (void*)aarch64_stub_prolog, byteCount); 2246 2247 // write the address of the setup routine and the call format at the 2248 // end of into the copied code 2249 u_int64_t *patch_end = (u_int64_t *)(start + byteCount); 2250 if (prolog_ptr) 2251 patch_end[-2] = (u_int64_t)prolog_ptr; 2252 patch_end[-1] = calltype; 2253 } 2254 #endif 2255 2256 void MacroAssembler::push_CPU_state() { 2257 push(0x3fffffff, sp); // integer registers except lr & sp 2258 2259 for (int i = 30; i >= 0; i -= 2) 2260 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2261 Address(pre(sp, -2 * wordSize))); 2262 } 2263 2264 void MacroAssembler::pop_CPU_state() { 2265 for (int i = 0; i < 32; i += 2) 2266 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2267 Address(post(sp, 2 * wordSize))); 2268 2269 pop(0x3fffffff, sp); // integer registers except lr & sp 2270 } 2271 2272 /** 2273 * Helpers for multiply_to_len(). 2274 */ 2275 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 2276 Register src1, Register src2) { 2277 adds(dest_lo, dest_lo, src1); 2278 adc(dest_hi, dest_hi, zr); 2279 adds(dest_lo, dest_lo, src2); 2280 adc(final_dest_hi, dest_hi, zr); 2281 } 2282 2283 // Generate an address from (r + r1 extend offset). "size" is the 2284 // size of the operand. The result may be in rscratch2. 2285 Address MacroAssembler::offsetted_address(Register r, Register r1, 2286 Address::extend ext, int offset, int size) { 2287 if (offset || (ext.shift() % size != 0)) { 2288 lea(rscratch2, Address(r, r1, ext)); 2289 return Address(rscratch2, offset); 2290 } else { 2291 return Address(r, r1, ext); 2292 } 2293 } 2294 2295 /** 2296 * Multiply 64 bit by 64 bit first loop. 2297 */ 2298 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2299 Register y, Register y_idx, Register z, 2300 Register carry, Register product, 2301 Register idx, Register kdx) { 2302 // 2303 // jlong carry, x[], y[], z[]; 2304 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2305 // huge_128 product = y[idx] * x[xstart] + carry; 2306 // z[kdx] = (jlong)product; 2307 // carry = (jlong)(product >>> 64); 2308 // } 2309 // z[xstart] = carry; 2310 // 2311 2312 Label L_first_loop, L_first_loop_exit; 2313 Label L_one_x, L_one_y, L_multiply; 2314 2315 subsw(xstart, xstart, 1); 2316 br(Assembler::MI, L_one_x); 2317 2318 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 2319 ldr(x_xstart, Address(rscratch1)); 2320 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 2321 2322 bind(L_first_loop); 2323 subsw(idx, idx, 1); 2324 br(Assembler::MI, L_first_loop_exit); 2325 subsw(idx, idx, 1); 2326 br(Assembler::MI, L_one_y); 2327 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2328 ldr(y_idx, Address(rscratch1)); 2329 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 2330 bind(L_multiply); 2331 2332 // AArch64 has a multiply-accumulate instruction that we can't use 2333 // here because it has no way to process carries, so we have to use 2334 // separate add and adc instructions. Bah. 2335 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 2336 mul(product, x_xstart, y_idx); 2337 adds(product, product, carry); 2338 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 2339 2340 subw(kdx, kdx, 2); 2341 ror(product, product, 32); // back to big-endian 2342 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 2343 2344 b(L_first_loop); 2345 2346 bind(L_one_y); 2347 ldrw(y_idx, Address(y, 0)); 2348 b(L_multiply); 2349 2350 bind(L_one_x); 2351 ldrw(x_xstart, Address(x, 0)); 2352 b(L_first_loop); 2353 2354 bind(L_first_loop_exit); 2355 } 2356 2357 /** 2358 * Multiply 128 bit by 128. Unrolled inner loop. 2359 * 2360 */ 2361 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 2362 Register carry, Register carry2, 2363 Register idx, Register jdx, 2364 Register yz_idx1, Register yz_idx2, 2365 Register tmp, Register tmp3, Register tmp4, 2366 Register tmp6, Register product_hi) { 2367 2368 // jlong carry, x[], y[], z[]; 2369 // int kdx = ystart+1; 2370 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 2371 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 2372 // jlong carry2 = (jlong)(tmp3 >>> 64); 2373 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 2374 // carry = (jlong)(tmp4 >>> 64); 2375 // z[kdx+idx+1] = (jlong)tmp3; 2376 // z[kdx+idx] = (jlong)tmp4; 2377 // } 2378 // idx += 2; 2379 // if (idx > 0) { 2380 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 2381 // z[kdx+idx] = (jlong)yz_idx1; 2382 // carry = (jlong)(yz_idx1 >>> 64); 2383 // } 2384 // 2385 2386 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 2387 2388 lsrw(jdx, idx, 2); 2389 2390 bind(L_third_loop); 2391 2392 subsw(jdx, jdx, 1); 2393 br(Assembler::MI, L_third_loop_exit); 2394 subw(idx, idx, 4); 2395 2396 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2397 2398 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 2399 2400 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2401 2402 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 2403 ror(yz_idx2, yz_idx2, 32); 2404 2405 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 2406 2407 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2408 umulh(tmp4, product_hi, yz_idx1); 2409 2410 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 2411 ror(rscratch2, rscratch2, 32); 2412 2413 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 2414 umulh(carry2, product_hi, yz_idx2); 2415 2416 // propagate sum of both multiplications into carry:tmp4:tmp3 2417 adds(tmp3, tmp3, carry); 2418 adc(tmp4, tmp4, zr); 2419 adds(tmp3, tmp3, rscratch1); 2420 adcs(tmp4, tmp4, tmp); 2421 adc(carry, carry2, zr); 2422 adds(tmp4, tmp4, rscratch2); 2423 adc(carry, carry, zr); 2424 2425 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 2426 ror(tmp4, tmp4, 32); 2427 stp(tmp4, tmp3, Address(tmp6, 0)); 2428 2429 b(L_third_loop); 2430 bind (L_third_loop_exit); 2431 2432 andw (idx, idx, 0x3); 2433 cbz(idx, L_post_third_loop_done); 2434 2435 Label L_check_1; 2436 subsw(idx, idx, 2); 2437 br(Assembler::MI, L_check_1); 2438 2439 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2440 ldr(yz_idx1, Address(rscratch1, 0)); 2441 ror(yz_idx1, yz_idx1, 32); 2442 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2443 umulh(tmp4, product_hi, yz_idx1); 2444 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2445 ldr(yz_idx2, Address(rscratch1, 0)); 2446 ror(yz_idx2, yz_idx2, 32); 2447 2448 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 2449 2450 ror(tmp3, tmp3, 32); 2451 str(tmp3, Address(rscratch1, 0)); 2452 2453 bind (L_check_1); 2454 2455 andw (idx, idx, 0x1); 2456 subsw(idx, idx, 1); 2457 br(Assembler::MI, L_post_third_loop_done); 2458 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2459 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 2460 umulh(carry2, tmp4, product_hi); 2461 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2462 2463 add2_with_carry(carry2, tmp3, tmp4, carry); 2464 2465 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2466 extr(carry, carry2, tmp3, 32); 2467 2468 bind(L_post_third_loop_done); 2469 } 2470 2471 /** 2472 * Code for BigInteger::multiplyToLen() instrinsic. 2473 * 2474 * r0: x 2475 * r1: xlen 2476 * r2: y 2477 * r3: ylen 2478 * r4: z 2479 * r5: zlen 2480 * r10: tmp1 2481 * r11: tmp2 2482 * r12: tmp3 2483 * r13: tmp4 2484 * r14: tmp5 2485 * r15: tmp6 2486 * r16: tmp7 2487 * 2488 */ 2489 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 2490 Register z, Register zlen, 2491 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 2492 Register tmp5, Register tmp6, Register product_hi) { 2493 2494 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 2495 2496 const Register idx = tmp1; 2497 const Register kdx = tmp2; 2498 const Register xstart = tmp3; 2499 2500 const Register y_idx = tmp4; 2501 const Register carry = tmp5; 2502 const Register product = xlen; 2503 const Register x_xstart = zlen; // reuse register 2504 2505 // First Loop. 2506 // 2507 // final static long LONG_MASK = 0xffffffffL; 2508 // int xstart = xlen - 1; 2509 // int ystart = ylen - 1; 2510 // long carry = 0; 2511 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2512 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 2513 // z[kdx] = (int)product; 2514 // carry = product >>> 32; 2515 // } 2516 // z[xstart] = (int)carry; 2517 // 2518 2519 movw(idx, ylen); // idx = ylen; 2520 movw(kdx, zlen); // kdx = xlen+ylen; 2521 mov(carry, zr); // carry = 0; 2522 2523 Label L_done; 2524 2525 movw(xstart, xlen); 2526 subsw(xstart, xstart, 1); 2527 br(Assembler::MI, L_done); 2528 2529 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 2530 2531 Label L_second_loop; 2532 cbzw(kdx, L_second_loop); 2533 2534 Label L_carry; 2535 subw(kdx, kdx, 1); 2536 cbzw(kdx, L_carry); 2537 2538 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2539 lsr(carry, carry, 32); 2540 subw(kdx, kdx, 1); 2541 2542 bind(L_carry); 2543 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2544 2545 // Second and third (nested) loops. 2546 // 2547 // for (int i = xstart-1; i >= 0; i--) { // Second loop 2548 // carry = 0; 2549 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 2550 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 2551 // (z[k] & LONG_MASK) + carry; 2552 // z[k] = (int)product; 2553 // carry = product >>> 32; 2554 // } 2555 // z[i] = (int)carry; 2556 // } 2557 // 2558 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 2559 2560 const Register jdx = tmp1; 2561 2562 bind(L_second_loop); 2563 mov(carry, zr); // carry = 0; 2564 movw(jdx, ylen); // j = ystart+1 2565 2566 subsw(xstart, xstart, 1); // i = xstart-1; 2567 br(Assembler::MI, L_done); 2568 2569 str(z, Address(pre(sp, -4 * wordSize))); 2570 2571 Label L_last_x; 2572 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 2573 subsw(xstart, xstart, 1); // i = xstart-1; 2574 br(Assembler::MI, L_last_x); 2575 2576 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 2577 ldr(product_hi, Address(rscratch1)); 2578 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 2579 2580 Label L_third_loop_prologue; 2581 bind(L_third_loop_prologue); 2582 2583 str(ylen, Address(sp, wordSize)); 2584 stp(x, xstart, Address(sp, 2 * wordSize)); 2585 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 2586 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 2587 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 2588 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 2589 2590 addw(tmp3, xlen, 1); 2591 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2592 subsw(tmp3, tmp3, 1); 2593 br(Assembler::MI, L_done); 2594 2595 lsr(carry, carry, 32); 2596 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2597 b(L_second_loop); 2598 2599 // Next infrequent code is moved outside loops. 2600 bind(L_last_x); 2601 ldrw(product_hi, Address(x, 0)); 2602 b(L_third_loop_prologue); 2603 2604 bind(L_done); 2605 } 2606 2607 /** 2608 * Emits code to update CRC-32 with a byte value according to constants in table 2609 * 2610 * @param [in,out]crc Register containing the crc. 2611 * @param [in]val Register containing the byte to fold into the CRC. 2612 * @param [in]table Register containing the table of crc constants. 2613 * 2614 * uint32_t crc; 2615 * val = crc_table[(val ^ crc) & 0xFF]; 2616 * crc = val ^ (crc >> 8); 2617 * 2618 */ 2619 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 2620 eor(val, val, crc); 2621 andr(val, val, 0xff); 2622 ldrw(val, Address(table, val, Address::lsl(2))); 2623 eor(crc, val, crc, Assembler::LSR, 8); 2624 } 2625 2626 /** 2627 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 2628 * 2629 * @param [in,out]crc Register containing the crc. 2630 * @param [in]v Register containing the 32-bit to fold into the CRC. 2631 * @param [in]table0 Register containing table 0 of crc constants. 2632 * @param [in]table1 Register containing table 1 of crc constants. 2633 * @param [in]table2 Register containing table 2 of crc constants. 2634 * @param [in]table3 Register containing table 3 of crc constants. 2635 * 2636 * uint32_t crc; 2637 * v = crc ^ v 2638 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 2639 * 2640 */ 2641 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 2642 Register table0, Register table1, Register table2, Register table3, 2643 bool upper) { 2644 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 2645 uxtb(tmp, v); 2646 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 2647 ubfx(tmp, v, 8, 8); 2648 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 2649 eor(crc, crc, tmp); 2650 ubfx(tmp, v, 16, 8); 2651 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 2652 eor(crc, crc, tmp); 2653 ubfx(tmp, v, 24, 8); 2654 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 2655 eor(crc, crc, tmp); 2656 } 2657 2658 /** 2659 * @param crc register containing existing CRC (32-bit) 2660 * @param buf register pointing to input byte buffer (byte*) 2661 * @param len register containing number of bytes 2662 * @param table register that will contain address of CRC table 2663 * @param tmp scratch register 2664 */ 2665 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 2666 Register table0, Register table1, Register table2, Register table3, 2667 Register tmp, Register tmp2, Register tmp3) { 2668 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 2669 unsigned long offset; 2670 2671 ornw(crc, zr, crc); 2672 2673 if (UseCRC32) { 2674 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2675 2676 subs(len, len, 64); 2677 br(Assembler::GE, CRC_by64_loop); 2678 adds(len, len, 64-4); 2679 br(Assembler::GE, CRC_by4_loop); 2680 adds(len, len, 4); 2681 br(Assembler::GT, CRC_by1_loop); 2682 b(L_exit); 2683 2684 BIND(CRC_by4_loop); 2685 ldrw(tmp, Address(post(buf, 4))); 2686 subs(len, len, 4); 2687 crc32w(crc, crc, tmp); 2688 br(Assembler::GE, CRC_by4_loop); 2689 adds(len, len, 4); 2690 br(Assembler::LE, L_exit); 2691 BIND(CRC_by1_loop); 2692 ldrb(tmp, Address(post(buf, 1))); 2693 subs(len, len, 1); 2694 crc32b(crc, crc, tmp); 2695 br(Assembler::GT, CRC_by1_loop); 2696 b(L_exit); 2697 2698 align(CodeEntryAlignment); 2699 BIND(CRC_by64_loop); 2700 subs(len, len, 64); 2701 ldp(tmp, tmp3, Address(post(buf, 16))); 2702 crc32x(crc, crc, tmp); 2703 crc32x(crc, crc, tmp3); 2704 ldp(tmp, tmp3, Address(post(buf, 16))); 2705 crc32x(crc, crc, tmp); 2706 crc32x(crc, crc, tmp3); 2707 ldp(tmp, tmp3, Address(post(buf, 16))); 2708 crc32x(crc, crc, tmp); 2709 crc32x(crc, crc, tmp3); 2710 ldp(tmp, tmp3, Address(post(buf, 16))); 2711 crc32x(crc, crc, tmp); 2712 crc32x(crc, crc, tmp3); 2713 br(Assembler::GE, CRC_by64_loop); 2714 adds(len, len, 64-4); 2715 br(Assembler::GE, CRC_by4_loop); 2716 adds(len, len, 4); 2717 br(Assembler::GT, CRC_by1_loop); 2718 BIND(L_exit); 2719 ornw(crc, zr, crc); 2720 return; 2721 } 2722 2723 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2724 if (offset) add(table0, table0, offset); 2725 add(table1, table0, 1*256*sizeof(juint)); 2726 add(table2, table0, 2*256*sizeof(juint)); 2727 add(table3, table0, 3*256*sizeof(juint)); 2728 2729 if (UseNeon) { 2730 cmp(len, 64); 2731 br(Assembler::LT, L_by16); 2732 eor(v16, T16B, v16, v16); 2733 2734 Label L_fold; 2735 2736 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 2737 2738 ld1(v0, v1, T2D, post(buf, 32)); 2739 ld1r(v4, T2D, post(tmp, 8)); 2740 ld1r(v5, T2D, post(tmp, 8)); 2741 ld1r(v6, T2D, post(tmp, 8)); 2742 ld1r(v7, T2D, post(tmp, 8)); 2743 mov(v16, T4S, 0, crc); 2744 2745 eor(v0, T16B, v0, v16); 2746 sub(len, len, 64); 2747 2748 BIND(L_fold); 2749 pmull(v22, T8H, v0, v5, T8B); 2750 pmull(v20, T8H, v0, v7, T8B); 2751 pmull(v23, T8H, v0, v4, T8B); 2752 pmull(v21, T8H, v0, v6, T8B); 2753 2754 pmull2(v18, T8H, v0, v5, T16B); 2755 pmull2(v16, T8H, v0, v7, T16B); 2756 pmull2(v19, T8H, v0, v4, T16B); 2757 pmull2(v17, T8H, v0, v6, T16B); 2758 2759 uzp1(v24, v20, v22, T8H); 2760 uzp2(v25, v20, v22, T8H); 2761 eor(v20, T16B, v24, v25); 2762 2763 uzp1(v26, v16, v18, T8H); 2764 uzp2(v27, v16, v18, T8H); 2765 eor(v16, T16B, v26, v27); 2766 2767 ushll2(v22, T4S, v20, T8H, 8); 2768 ushll(v20, T4S, v20, T4H, 8); 2769 2770 ushll2(v18, T4S, v16, T8H, 8); 2771 ushll(v16, T4S, v16, T4H, 8); 2772 2773 eor(v22, T16B, v23, v22); 2774 eor(v18, T16B, v19, v18); 2775 eor(v20, T16B, v21, v20); 2776 eor(v16, T16B, v17, v16); 2777 2778 uzp1(v17, v16, v20, T2D); 2779 uzp2(v21, v16, v20, T2D); 2780 eor(v17, T16B, v17, v21); 2781 2782 ushll2(v20, T2D, v17, T4S, 16); 2783 ushll(v16, T2D, v17, T2S, 16); 2784 2785 eor(v20, T16B, v20, v22); 2786 eor(v16, T16B, v16, v18); 2787 2788 uzp1(v17, v20, v16, T2D); 2789 uzp2(v21, v20, v16, T2D); 2790 eor(v28, T16B, v17, v21); 2791 2792 pmull(v22, T8H, v1, v5, T8B); 2793 pmull(v20, T8H, v1, v7, T8B); 2794 pmull(v23, T8H, v1, v4, T8B); 2795 pmull(v21, T8H, v1, v6, T8B); 2796 2797 pmull2(v18, T8H, v1, v5, T16B); 2798 pmull2(v16, T8H, v1, v7, T16B); 2799 pmull2(v19, T8H, v1, v4, T16B); 2800 pmull2(v17, T8H, v1, v6, T16B); 2801 2802 ld1(v0, v1, T2D, post(buf, 32)); 2803 2804 uzp1(v24, v20, v22, T8H); 2805 uzp2(v25, v20, v22, T8H); 2806 eor(v20, T16B, v24, v25); 2807 2808 uzp1(v26, v16, v18, T8H); 2809 uzp2(v27, v16, v18, T8H); 2810 eor(v16, T16B, v26, v27); 2811 2812 ushll2(v22, T4S, v20, T8H, 8); 2813 ushll(v20, T4S, v20, T4H, 8); 2814 2815 ushll2(v18, T4S, v16, T8H, 8); 2816 ushll(v16, T4S, v16, T4H, 8); 2817 2818 eor(v22, T16B, v23, v22); 2819 eor(v18, T16B, v19, v18); 2820 eor(v20, T16B, v21, v20); 2821 eor(v16, T16B, v17, v16); 2822 2823 uzp1(v17, v16, v20, T2D); 2824 uzp2(v21, v16, v20, T2D); 2825 eor(v16, T16B, v17, v21); 2826 2827 ushll2(v20, T2D, v16, T4S, 16); 2828 ushll(v16, T2D, v16, T2S, 16); 2829 2830 eor(v20, T16B, v22, v20); 2831 eor(v16, T16B, v16, v18); 2832 2833 uzp1(v17, v20, v16, T2D); 2834 uzp2(v21, v20, v16, T2D); 2835 eor(v20, T16B, v17, v21); 2836 2837 shl(v16, T2D, v28, 1); 2838 shl(v17, T2D, v20, 1); 2839 2840 eor(v0, T16B, v0, v16); 2841 eor(v1, T16B, v1, v17); 2842 2843 subs(len, len, 32); 2844 br(Assembler::GE, L_fold); 2845 2846 mov(crc, 0); 2847 mov(tmp, v0, T1D, 0); 2848 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2849 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2850 mov(tmp, v0, T1D, 1); 2851 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2852 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2853 mov(tmp, v1, T1D, 0); 2854 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2855 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2856 mov(tmp, v1, T1D, 1); 2857 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2858 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2859 2860 add(len, len, 32); 2861 } 2862 2863 BIND(L_by16); 2864 subs(len, len, 16); 2865 br(Assembler::GE, L_by16_loop); 2866 adds(len, len, 16-4); 2867 br(Assembler::GE, L_by4_loop); 2868 adds(len, len, 4); 2869 br(Assembler::GT, L_by1_loop); 2870 b(L_exit); 2871 2872 BIND(L_by4_loop); 2873 ldrw(tmp, Address(post(buf, 4))); 2874 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 2875 subs(len, len, 4); 2876 br(Assembler::GE, L_by4_loop); 2877 adds(len, len, 4); 2878 br(Assembler::LE, L_exit); 2879 BIND(L_by1_loop); 2880 subs(len, len, 1); 2881 ldrb(tmp, Address(post(buf, 1))); 2882 update_byte_crc32(crc, tmp, table0); 2883 br(Assembler::GT, L_by1_loop); 2884 b(L_exit); 2885 2886 align(CodeEntryAlignment); 2887 BIND(L_by16_loop); 2888 subs(len, len, 16); 2889 ldp(tmp, tmp3, Address(post(buf, 16))); 2890 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2891 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2892 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 2893 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 2894 br(Assembler::GE, L_by16_loop); 2895 adds(len, len, 16-4); 2896 br(Assembler::GE, L_by4_loop); 2897 adds(len, len, 4); 2898 br(Assembler::GT, L_by1_loop); 2899 BIND(L_exit); 2900 ornw(crc, zr, crc); 2901 } 2902 2903 SkipIfEqual::SkipIfEqual( 2904 MacroAssembler* masm, const bool* flag_addr, bool value) { 2905 _masm = masm; 2906 unsigned long offset; 2907 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 2908 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 2909 _masm->cbzw(rscratch1, _label); 2910 } 2911 2912 SkipIfEqual::~SkipIfEqual() { 2913 _masm->bind(_label); 2914 } 2915 2916 void MacroAssembler::cmpptr(Register src1, Address src2) { 2917 unsigned long offset; 2918 adrp(rscratch1, src2, offset); 2919 ldr(rscratch1, Address(rscratch1, offset)); 2920 cmp(src1, rscratch1); 2921 } 2922 2923 void MacroAssembler::store_check(Register obj) { 2924 // Does a store check for the oop in register obj. The content of 2925 // register obj is destroyed afterwards. 2926 store_check_part_1(obj); 2927 store_check_part_2(obj); 2928 } 2929 2930 void MacroAssembler::store_check(Register obj, Address dst) { 2931 store_check(obj); 2932 } 2933 2934 2935 // split the store check operation so that other instructions can be scheduled inbetween 2936 void MacroAssembler::store_check_part_1(Register obj) { 2937 BarrierSet* bs = Universe::heap()->barrier_set(); 2938 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 2939 lsr(obj, obj, CardTableModRefBS::card_shift); 2940 } 2941 2942 void MacroAssembler::store_check_part_2(Register obj) { 2943 BarrierSet* bs = Universe::heap()->barrier_set(); 2944 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 2945 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 2946 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 2947 2948 // The calculation for byte_map_base is as follows: 2949 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 2950 // So this essentially converts an address to a displacement and 2951 // it will never need to be relocated. 2952 2953 // FIXME: It's not likely that disp will fit into an offset so we 2954 // don't bother to check, but it could save an instruction. 2955 intptr_t disp = (intptr_t) ct->byte_map_base; 2956 mov(rscratch1, disp); 2957 strb(zr, Address(obj, rscratch1)); 2958 } 2959 2960 void MacroAssembler::load_klass(Register dst, Register src) { 2961 if (UseCompressedClassPointers) { 2962 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 2963 decode_klass_not_null(dst); 2964 } else { 2965 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 2966 } 2967 } 2968 2969 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 2970 if (UseCompressedClassPointers) { 2971 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 2972 if (Universe::narrow_klass_base() == NULL) { 2973 cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift()); 2974 return; 2975 } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 2976 && Universe::narrow_klass_shift() == 0) { 2977 // Only the bottom 32 bits matter 2978 cmpw(trial_klass, tmp); 2979 return; 2980 } 2981 decode_klass_not_null(tmp); 2982 } else { 2983 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 2984 } 2985 cmp(trial_klass, tmp); 2986 } 2987 2988 void MacroAssembler::load_prototype_header(Register dst, Register src) { 2989 load_klass(dst, src); 2990 ldr(dst, Address(dst, Klass::prototype_header_offset())); 2991 } 2992 2993 void MacroAssembler::store_klass(Register dst, Register src) { 2994 // FIXME: Should this be a store release? concurrent gcs assumes 2995 // klass length is valid if klass field is not null. 2996 if (UseCompressedClassPointers) { 2997 encode_klass_not_null(src); 2998 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 2999 } else { 3000 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3001 } 3002 } 3003 3004 void MacroAssembler::store_klass_gap(Register dst, Register src) { 3005 if (UseCompressedClassPointers) { 3006 // Store to klass gap in destination 3007 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 3008 } 3009 } 3010 3011 // Algorithm must match oop.inline.hpp encode_heap_oop. 3012 void MacroAssembler::encode_heap_oop(Register d, Register s) { 3013 #ifdef ASSERT 3014 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 3015 #endif 3016 verify_oop(s, "broken oop in encode_heap_oop"); 3017 if (Universe::narrow_oop_base() == NULL) { 3018 if (Universe::narrow_oop_shift() != 0) { 3019 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3020 lsr(d, s, LogMinObjAlignmentInBytes); 3021 } else { 3022 mov(d, s); 3023 } 3024 } else { 3025 subs(d, s, rheapbase); 3026 csel(d, d, zr, Assembler::HS); 3027 lsr(d, d, LogMinObjAlignmentInBytes); 3028 3029 /* Old algorithm: is this any worse? 3030 Label nonnull; 3031 cbnz(r, nonnull); 3032 sub(r, r, rheapbase); 3033 bind(nonnull); 3034 lsr(r, r, LogMinObjAlignmentInBytes); 3035 */ 3036 } 3037 } 3038 3039 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3040 #ifdef ASSERT 3041 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 3042 if (CheckCompressedOops) { 3043 Label ok; 3044 cbnz(r, ok); 3045 stop("null oop passed to encode_heap_oop_not_null"); 3046 bind(ok); 3047 } 3048 #endif 3049 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 3050 if (Universe::narrow_oop_base() != NULL) { 3051 sub(r, r, rheapbase); 3052 } 3053 if (Universe::narrow_oop_shift() != 0) { 3054 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3055 lsr(r, r, LogMinObjAlignmentInBytes); 3056 } 3057 } 3058 3059 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 3060 #ifdef ASSERT 3061 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 3062 if (CheckCompressedOops) { 3063 Label ok; 3064 cbnz(src, ok); 3065 stop("null oop passed to encode_heap_oop_not_null2"); 3066 bind(ok); 3067 } 3068 #endif 3069 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 3070 3071 Register data = src; 3072 if (Universe::narrow_oop_base() != NULL) { 3073 sub(dst, src, rheapbase); 3074 data = dst; 3075 } 3076 if (Universe::narrow_oop_shift() != 0) { 3077 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3078 lsr(dst, data, LogMinObjAlignmentInBytes); 3079 data = dst; 3080 } 3081 if (data == src) 3082 mov(dst, src); 3083 } 3084 3085 void MacroAssembler::decode_heap_oop(Register d, Register s) { 3086 #ifdef ASSERT 3087 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 3088 #endif 3089 if (Universe::narrow_oop_base() == NULL) { 3090 if (Universe::narrow_oop_shift() != 0 || d != s) { 3091 lsl(d, s, Universe::narrow_oop_shift()); 3092 } 3093 } else { 3094 Label done; 3095 if (d != s) 3096 mov(d, s); 3097 cbz(s, done); 3098 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 3099 bind(done); 3100 } 3101 verify_oop(d, "broken oop in decode_heap_oop"); 3102 } 3103 3104 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3105 assert (UseCompressedOops, "should only be used for compressed headers"); 3106 assert (Universe::heap() != NULL, "java heap should be initialized"); 3107 // Cannot assert, unverified entry point counts instructions (see .ad file) 3108 // vtableStubs also counts instructions in pd_code_size_limit. 3109 // Also do not verify_oop as this is called by verify_oop. 3110 if (Universe::narrow_oop_shift() != 0) { 3111 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3112 if (Universe::narrow_oop_base() != NULL) { 3113 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3114 } else { 3115 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3116 } 3117 } else { 3118 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3119 } 3120 } 3121 3122 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 3123 assert (UseCompressedOops, "should only be used for compressed headers"); 3124 assert (Universe::heap() != NULL, "java heap should be initialized"); 3125 // Cannot assert, unverified entry point counts instructions (see .ad file) 3126 // vtableStubs also counts instructions in pd_code_size_limit. 3127 // Also do not verify_oop as this is called by verify_oop. 3128 if (Universe::narrow_oop_shift() != 0) { 3129 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3130 if (Universe::narrow_oop_base() != NULL) { 3131 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3132 } else { 3133 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3134 } 3135 } else { 3136 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3137 if (dst != src) { 3138 mov(dst, src); 3139 } 3140 } 3141 } 3142 3143 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3144 if (Universe::narrow_klass_base() == NULL) { 3145 if (Universe::narrow_klass_shift() != 0) { 3146 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3147 lsr(dst, src, LogKlassAlignmentInBytes); 3148 } else { 3149 if (dst != src) mov(dst, src); 3150 } 3151 return; 3152 } 3153 3154 if (use_XOR_for_compressed_class_base) { 3155 if (Universe::narrow_klass_shift() != 0) { 3156 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3157 lsr(dst, dst, LogKlassAlignmentInBytes); 3158 } else { 3159 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3160 } 3161 return; 3162 } 3163 3164 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3165 && Universe::narrow_klass_shift() == 0) { 3166 movw(dst, src); 3167 return; 3168 } 3169 3170 #ifdef ASSERT 3171 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 3172 #endif 3173 3174 Register rbase = dst; 3175 if (dst == src) rbase = rheapbase; 3176 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3177 sub(dst, src, rbase); 3178 if (Universe::narrow_klass_shift() != 0) { 3179 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3180 lsr(dst, dst, LogKlassAlignmentInBytes); 3181 } 3182 if (dst == src) reinit_heapbase(); 3183 } 3184 3185 void MacroAssembler::encode_klass_not_null(Register r) { 3186 encode_klass_not_null(r, r); 3187 } 3188 3189 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3190 Register rbase = dst; 3191 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3192 3193 if (Universe::narrow_klass_base() == NULL) { 3194 if (Universe::narrow_klass_shift() != 0) { 3195 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3196 lsl(dst, src, LogKlassAlignmentInBytes); 3197 } else { 3198 if (dst != src) mov(dst, src); 3199 } 3200 return; 3201 } 3202 3203 if (use_XOR_for_compressed_class_base) { 3204 if (Universe::narrow_klass_shift() != 0) { 3205 lsl(dst, src, LogKlassAlignmentInBytes); 3206 eor(dst, dst, (uint64_t)Universe::narrow_klass_base()); 3207 } else { 3208 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3209 } 3210 return; 3211 } 3212 3213 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3214 && Universe::narrow_klass_shift() == 0) { 3215 if (dst != src) 3216 movw(dst, src); 3217 movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32); 3218 return; 3219 } 3220 3221 // Cannot assert, unverified entry point counts instructions (see .ad file) 3222 // vtableStubs also counts instructions in pd_code_size_limit. 3223 // Also do not verify_oop as this is called by verify_oop. 3224 if (dst == src) rbase = rheapbase; 3225 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3226 if (Universe::narrow_klass_shift() != 0) { 3227 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3228 add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); 3229 } else { 3230 add(dst, rbase, src); 3231 } 3232 if (dst == src) reinit_heapbase(); 3233 } 3234 3235 void MacroAssembler::decode_klass_not_null(Register r) { 3236 decode_klass_not_null(r, r); 3237 } 3238 3239 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 3240 assert (UseCompressedOops, "should only be used for compressed oops"); 3241 assert (Universe::heap() != NULL, "java heap should be initialized"); 3242 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3243 3244 int oop_index = oop_recorder()->find_index(obj); 3245 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3246 3247 InstructionMark im(this); 3248 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3249 code_section()->relocate(inst_mark(), rspec); 3250 movz(dst, 0xDEAD, 16); 3251 movk(dst, 0xBEEF); 3252 } 3253 3254 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 3255 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3256 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3257 int index = oop_recorder()->find_index(k); 3258 assert(! Universe::heap()->is_in_reserved(k), "should not be an oop"); 3259 3260 InstructionMark im(this); 3261 RelocationHolder rspec = metadata_Relocation::spec(index); 3262 code_section()->relocate(inst_mark(), rspec); 3263 narrowKlass nk = Klass::encode_klass(k); 3264 movz(dst, (nk >> 16), 16); 3265 movk(dst, nk & 0xffff); 3266 } 3267 3268 void MacroAssembler::load_heap_oop(Register dst, Address src) 3269 { 3270 if (UseCompressedOops) { 3271 ldrw(dst, src); 3272 decode_heap_oop(dst); 3273 } else { 3274 ldr(dst, src); 3275 } 3276 } 3277 3278 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) 3279 { 3280 if (UseCompressedOops) { 3281 ldrw(dst, src); 3282 decode_heap_oop_not_null(dst); 3283 } else { 3284 ldr(dst, src); 3285 } 3286 } 3287 3288 void MacroAssembler::store_heap_oop(Address dst, Register src) { 3289 if (UseCompressedOops) { 3290 assert(!dst.uses(src), "not enough registers"); 3291 encode_heap_oop(src); 3292 strw(src, dst); 3293 } else 3294 str(src, dst); 3295 } 3296 3297 // Used for storing NULLs. 3298 void MacroAssembler::store_heap_oop_null(Address dst) { 3299 if (UseCompressedOops) { 3300 strw(zr, dst); 3301 } else 3302 str(zr, dst); 3303 } 3304 3305 #if INCLUDE_ALL_GCS 3306 void MacroAssembler::g1_write_barrier_pre(Register obj, 3307 Register pre_val, 3308 Register thread, 3309 Register tmp, 3310 bool tosca_live, 3311 bool expand_call) { 3312 // If expand_call is true then we expand the call_VM_leaf macro 3313 // directly to skip generating the check by 3314 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 3315 3316 assert(thread == rthread, "must be"); 3317 3318 Label done; 3319 Label runtime; 3320 3321 assert(pre_val != noreg, "check this code"); 3322 3323 if (obj != noreg) 3324 assert_different_registers(obj, pre_val, tmp); 3325 3326 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3327 PtrQueue::byte_offset_of_active())); 3328 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3329 PtrQueue::byte_offset_of_index())); 3330 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3331 PtrQueue::byte_offset_of_buf())); 3332 3333 3334 // Is marking active? 3335 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3336 ldrw(tmp, in_progress); 3337 } else { 3338 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 3339 ldrb(tmp, in_progress); 3340 } 3341 cbzw(tmp, done); 3342 3343 // Do we need to load the previous value? 3344 if (obj != noreg) { 3345 load_heap_oop(pre_val, Address(obj, 0)); 3346 } 3347 3348 // Is the previous value null? 3349 cbz(pre_val, done); 3350 3351 // Can we store original value in the thread's buffer? 3352 // Is index == 0? 3353 // (The index field is typed as size_t.) 3354 3355 ldr(tmp, index); // tmp := *index_adr 3356 cbz(tmp, runtime); // tmp == 0? 3357 // If yes, goto runtime 3358 3359 sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 3360 str(tmp, index); // *index_adr := tmp 3361 ldr(rscratch1, buffer); 3362 add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 3363 3364 // Record the previous value 3365 str(pre_val, Address(tmp, 0)); 3366 b(done); 3367 3368 bind(runtime); 3369 // save the live input values 3370 push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3371 3372 // Calling the runtime using the regular call_VM_leaf mechanism generates 3373 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 3374 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 3375 // 3376 // If we care generating the pre-barrier without a frame (e.g. in the 3377 // intrinsified Reference.get() routine) then ebp might be pointing to 3378 // the caller frame and so this check will most likely fail at runtime. 3379 // 3380 // Expanding the call directly bypasses the generation of the check. 3381 // So when we do not have have a full interpreter frame on the stack 3382 // expand_call should be passed true. 3383 3384 if (expand_call) { 3385 assert(pre_val != c_rarg1, "smashed arg"); 3386 pass_arg1(this, thread); 3387 pass_arg0(this, pre_val); 3388 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 3389 } else { 3390 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 3391 } 3392 3393 pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3394 3395 bind(done); 3396 } 3397 3398 void MacroAssembler::g1_write_barrier_post(Register store_addr, 3399 Register new_val, 3400 Register thread, 3401 Register tmp, 3402 Register tmp2) { 3403 assert(thread == rthread, "must be"); 3404 3405 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3406 PtrQueue::byte_offset_of_index())); 3407 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3408 PtrQueue::byte_offset_of_buf())); 3409 3410 BarrierSet* bs = Universe::heap()->barrier_set(); 3411 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 3412 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3413 3414 Label done; 3415 Label runtime; 3416 3417 // Does store cross heap regions? 3418 3419 eor(tmp, store_addr, new_val); 3420 lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes); 3421 cbz(tmp, done); 3422 3423 // crosses regions, storing NULL? 3424 3425 cbz(new_val, done); 3426 3427 // storing region crossing non-NULL, is card already dirty? 3428 3429 ExternalAddress cardtable((address) ct->byte_map_base); 3430 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3431 const Register card_addr = tmp; 3432 3433 lsr(card_addr, store_addr, CardTableModRefBS::card_shift); 3434 3435 unsigned long offset; 3436 adrp(tmp2, cardtable, offset); 3437 3438 // get the address of the card 3439 add(card_addr, card_addr, tmp2); 3440 ldrb(tmp2, Address(card_addr, offset)); 3441 cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3442 br(Assembler::EQ, done); 3443 3444 assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); 3445 3446 membar(Assembler::StoreLoad); 3447 3448 ldrb(tmp2, Address(card_addr, offset)); 3449 cbzw(tmp2, done); 3450 3451 // storing a region crossing, non-NULL oop, card is clean. 3452 // dirty card and log. 3453 3454 strb(zr, Address(card_addr, offset)); 3455 3456 ldr(rscratch1, queue_index); 3457 cbz(rscratch1, runtime); 3458 sub(rscratch1, rscratch1, wordSize); 3459 str(rscratch1, queue_index); 3460 3461 ldr(tmp2, buffer); 3462 str(card_addr, Address(tmp2, rscratch1)); 3463 b(done); 3464 3465 bind(runtime); 3466 // save the live input values 3467 push(store_addr->bit(true) | new_val->bit(true), sp); 3468 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 3469 pop(store_addr->bit(true) | new_val->bit(true), sp); 3470 3471 bind(done); 3472 } 3473 3474 #endif // INCLUDE_ALL_GCS 3475 3476 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 3477 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 3478 int index = oop_recorder()->allocate_metadata_index(obj); 3479 RelocationHolder rspec = metadata_Relocation::spec(index); 3480 return Address((address)obj, rspec); 3481 } 3482 3483 // Move an oop into a register. immediate is true if we want 3484 // immediate instrcutions, i.e. we are not going to patch this 3485 // instruction while the code is being executed by another thread. In 3486 // that case we can use move immediates rather than the constant pool. 3487 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { 3488 int oop_index; 3489 if (obj == NULL) { 3490 oop_index = oop_recorder()->allocate_oop_index(obj); 3491 } else { 3492 oop_index = oop_recorder()->find_index(obj); 3493 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3494 } 3495 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3496 if (! immediate) { 3497 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 3498 ldr_constant(dst, Address(dummy, rspec)); 3499 } else 3500 mov(dst, Address((address)obj, rspec)); 3501 } 3502 3503 // Move a metadata address into a register. 3504 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 3505 int oop_index; 3506 if (obj == NULL) { 3507 oop_index = oop_recorder()->allocate_metadata_index(obj); 3508 } else { 3509 oop_index = oop_recorder()->find_index(obj); 3510 } 3511 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 3512 mov(dst, Address((address)obj, rspec)); 3513 } 3514 3515 Address MacroAssembler::constant_oop_address(jobject obj) { 3516 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3517 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 3518 int oop_index = oop_recorder()->find_index(obj); 3519 return Address((address)obj, oop_Relocation::spec(oop_index)); 3520 } 3521 3522 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3523 void MacroAssembler::tlab_allocate(Register obj, 3524 Register var_size_in_bytes, 3525 int con_size_in_bytes, 3526 Register t1, 3527 Register t2, 3528 Label& slow_case) { 3529 assert_different_registers(obj, t2); 3530 assert_different_registers(obj, var_size_in_bytes); 3531 Register end = t2; 3532 3533 // verify_tlab(); 3534 3535 ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 3536 if (var_size_in_bytes == noreg) { 3537 lea(end, Address(obj, con_size_in_bytes)); 3538 } else { 3539 lea(end, Address(obj, var_size_in_bytes)); 3540 } 3541 ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 3542 cmp(end, rscratch1); 3543 br(Assembler::HI, slow_case); 3544 3545 // update the tlab top pointer 3546 str(end, Address(rthread, JavaThread::tlab_top_offset())); 3547 3548 // recover var_size_in_bytes if necessary 3549 if (var_size_in_bytes == end) { 3550 sub(var_size_in_bytes, var_size_in_bytes, obj); 3551 } 3552 // verify_tlab(); 3553 } 3554 3555 // Preserves r19, and r3. 3556 Register MacroAssembler::tlab_refill(Label& retry, 3557 Label& try_eden, 3558 Label& slow_case) { 3559 Register top = r0; 3560 Register t1 = r2; 3561 Register t2 = r4; 3562 assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); 3563 Label do_refill, discard_tlab; 3564 3565 if (!Universe::heap()->supports_inline_contig_alloc()) { 3566 // No allocation in the shared eden. 3567 b(slow_case); 3568 } 3569 3570 ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3571 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3572 3573 // calculate amount of free space 3574 sub(t1, t1, top); 3575 lsr(t1, t1, LogHeapWordSize); 3576 3577 // Retain tlab and allocate object in shared space if 3578 // the amount free in the tlab is too large to discard. 3579 3580 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3581 cmp(t1, rscratch1); 3582 br(Assembler::LE, discard_tlab); 3583 3584 // Retain 3585 // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3586 mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3587 add(rscratch1, rscratch1, t2); 3588 str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3589 3590 if (TLABStats) { 3591 // increment number of slow_allocations 3592 addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), 3593 1, rscratch1); 3594 } 3595 b(try_eden); 3596 3597 bind(discard_tlab); 3598 if (TLABStats) { 3599 // increment number of refills 3600 addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, 3601 rscratch1); 3602 // accumulate wastage -- t1 is amount free in tlab 3603 addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, 3604 rscratch1); 3605 } 3606 3607 // if tlab is currently allocated (top or end != null) then 3608 // fill [top, end + alignment_reserve) with array object 3609 cbz(top, do_refill); 3610 3611 // set up the mark word 3612 mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); 3613 str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); 3614 // set the length to the remaining space 3615 sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); 3616 add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); 3617 lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); 3618 strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); 3619 // set klass to intArrayKlass 3620 { 3621 unsigned long offset; 3622 // dubious reloc why not an oop reloc? 3623 adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), 3624 offset); 3625 ldr(t1, Address(rscratch1, offset)); 3626 } 3627 // store klass last. concurrent gcs assumes klass length is valid if 3628 // klass field is not null. 3629 store_klass(top, t1); 3630 3631 mov(t1, top); 3632 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3633 sub(t1, t1, rscratch1); 3634 incr_allocated_bytes(rthread, t1, 0, rscratch1); 3635 3636 // refill the tlab with an eden allocation 3637 bind(do_refill); 3638 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3639 lsl(t1, t1, LogHeapWordSize); 3640 // allocate new tlab, address returned in top 3641 eden_allocate(top, t1, 0, t2, slow_case); 3642 3643 // Check that t1 was preserved in eden_allocate. 3644 #ifdef ASSERT 3645 if (UseTLAB) { 3646 Label ok; 3647 Register tsize = r4; 3648 assert_different_registers(tsize, rthread, t1); 3649 str(tsize, Address(pre(sp, -16))); 3650 ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3651 lsl(tsize, tsize, LogHeapWordSize); 3652 cmp(t1, tsize); 3653 br(Assembler::EQ, ok); 3654 STOP("assert(t1 != tlab size)"); 3655 should_not_reach_here(); 3656 3657 bind(ok); 3658 ldr(tsize, Address(post(sp, 16))); 3659 } 3660 #endif 3661 str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3662 str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3663 add(top, top, t1); 3664 sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); 3665 str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3666 verify_tlab(); 3667 b(retry); 3668 3669 return rthread; // for use by caller 3670 } 3671 3672 // Defines obj, preserves var_size_in_bytes 3673 void MacroAssembler::eden_allocate(Register obj, 3674 Register var_size_in_bytes, 3675 int con_size_in_bytes, 3676 Register t1, 3677 Label& slow_case) { 3678 assert_different_registers(obj, var_size_in_bytes, t1); 3679 if (!Universe::heap()->supports_inline_contig_alloc()) { 3680 b(slow_case); 3681 } else { 3682 Register end = t1; 3683 Register heap_end = rscratch2; 3684 Label retry; 3685 bind(retry); 3686 { 3687 unsigned long offset; 3688 adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); 3689 ldr(heap_end, Address(rscratch1, offset)); 3690 } 3691 3692 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 3693 3694 // Get the current top of the heap 3695 { 3696 unsigned long offset; 3697 adrp(rscratch1, heap_top, offset); 3698 // Use add() here after ARDP, rather than lea(). 3699 // lea() does not generate anything if its offset is zero. 3700 // However, relocs expect to find either an ADD or a load/store 3701 // insn after an ADRP. add() always generates an ADD insn, even 3702 // for add(Rn, Rn, 0). 3703 add(rscratch1, rscratch1, offset); 3704 ldaxr(obj, rscratch1); 3705 } 3706 3707 // Adjust it my the size of our new object 3708 if (var_size_in_bytes == noreg) { 3709 lea(end, Address(obj, con_size_in_bytes)); 3710 } else { 3711 lea(end, Address(obj, var_size_in_bytes)); 3712 } 3713 3714 // if end < obj then we wrapped around high memory 3715 cmp(end, obj); 3716 br(Assembler::LO, slow_case); 3717 3718 cmp(end, heap_end); 3719 br(Assembler::HI, slow_case); 3720 3721 // If heap_top hasn't been changed by some other thread, update it. 3722 stlxr(rscratch1, end, rscratch1); 3723 cbnzw(rscratch1, retry); 3724 } 3725 } 3726 3727 void MacroAssembler::verify_tlab() { 3728 #ifdef ASSERT 3729 if (UseTLAB && VerifyOops) { 3730 Label next, ok; 3731 3732 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 3733 3734 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3735 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3736 cmp(rscratch2, rscratch1); 3737 br(Assembler::HS, next); 3738 STOP("assert(top >= start)"); 3739 should_not_reach_here(); 3740 3741 bind(next); 3742 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3743 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3744 cmp(rscratch2, rscratch1); 3745 br(Assembler::HS, ok); 3746 STOP("assert(top <= end)"); 3747 should_not_reach_here(); 3748 3749 bind(ok); 3750 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 3751 } 3752 #endif 3753 } 3754 3755 // Writes to stack successive pages until offset reached to check for 3756 // stack overflow + shadow pages. This clobbers tmp. 3757 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 3758 assert_different_registers(tmp, size, rscratch1); 3759 mov(tmp, sp); 3760 // Bang stack for total size given plus shadow page size. 3761 // Bang one page at a time because large size can bang beyond yellow and 3762 // red zones. 3763 Label loop; 3764 mov(rscratch1, os::vm_page_size()); 3765 bind(loop); 3766 lea(tmp, Address(tmp, -os::vm_page_size())); 3767 subsw(size, size, rscratch1); 3768 str(size, Address(tmp)); 3769 br(Assembler::GT, loop); 3770 3771 // Bang down shadow pages too. 3772 // At this point, (tmp-0) is the last address touched, so don't 3773 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3774 // was post-decremented.) Skip this address by starting at i=1, and 3775 // touch a few more pages below. N.B. It is important to touch all 3776 // the way down to and including i=StackShadowPages. 3777 for (int i = 0; i< StackShadowPages-1; i++) { 3778 // this could be any sized move but this is can be a debugging crumb 3779 // so the bigger the better. 3780 lea(tmp, Address(tmp, -os::vm_page_size())); 3781 str(size, Address(tmp)); 3782 } 3783 } 3784 3785 3786 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) { 3787 unsigned long off; 3788 adrp(r, Address(page, rtype), off); 3789 InstructionMark im(this); 3790 code_section()->relocate(inst_mark(), rtype); 3791 ldrw(zr, Address(r, off)); 3792 return inst_mark(); 3793 } 3794 3795 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 3796 InstructionMark im(this); 3797 code_section()->relocate(inst_mark(), rtype); 3798 ldrw(zr, Address(r, 0)); 3799 return inst_mark(); 3800 } 3801 3802 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { 3803 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 3804 if (uabs(pc() - dest.target()) >= (1LL << 32)) { 3805 guarantee(rtype == relocInfo::none 3806 || rtype == relocInfo::external_word_type 3807 || rtype == relocInfo::poll_type 3808 || rtype == relocInfo::poll_return_type, 3809 "can only use a fixed address with an ADRP"); 3810 // Out of range. This doesn't happen very often, but we have to 3811 // handle it 3812 mov(reg1, dest); 3813 byte_offset = 0; 3814 } else { 3815 InstructionMark im(this); 3816 code_section()->relocate(inst_mark(), dest.rspec()); 3817 byte_offset = (uint64_t)dest.target() & 0xfff; 3818 _adrp(reg1, dest.target()); 3819 } 3820 } 3821 3822 void MacroAssembler::build_frame(int framesize) { 3823 assert(framesize > 0, "framesize must be > 0"); 3824 if (framesize < ((1 << 9) + 2 * wordSize)) { 3825 sub(sp, sp, framesize); 3826 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3827 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 3828 } else { 3829 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 3830 if (PreserveFramePointer) mov(rfp, sp); 3831 if (framesize < ((1 << 12) + 2 * wordSize)) 3832 sub(sp, sp, framesize - 2 * wordSize); 3833 else { 3834 mov(rscratch1, framesize - 2 * wordSize); 3835 sub(sp, sp, rscratch1); 3836 } 3837 } 3838 } 3839 3840 void MacroAssembler::remove_frame(int framesize) { 3841 assert(framesize > 0, "framesize must be > 0"); 3842 if (framesize < ((1 << 9) + 2 * wordSize)) { 3843 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3844 add(sp, sp, framesize); 3845 } else { 3846 if (framesize < ((1 << 12) + 2 * wordSize)) 3847 add(sp, sp, framesize - 2 * wordSize); 3848 else { 3849 mov(rscratch1, framesize - 2 * wordSize); 3850 add(sp, sp, rscratch1); 3851 } 3852 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 3853 } 3854 } 3855 3856 3857 // Search for str1 in str2 and return index or -1 3858 void MacroAssembler::string_indexof(Register str2, Register str1, 3859 Register cnt2, Register cnt1, 3860 Register tmp1, Register tmp2, 3861 Register tmp3, Register tmp4, 3862 int icnt1, Register result) { 3863 Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH; 3864 3865 Register ch1 = rscratch1; 3866 Register ch2 = rscratch2; 3867 Register cnt1tmp = tmp1; 3868 Register cnt2tmp = tmp2; 3869 Register cnt1_neg = cnt1; 3870 Register cnt2_neg = cnt2; 3871 Register result_tmp = tmp4; 3872 3873 // Note, inline_string_indexOf() generates checks: 3874 // if (substr.count > string.count) return -1; 3875 // if (substr.count == 0) return 0; 3876 3877 // We have two strings, a source string in str2, cnt2 and a pattern string 3878 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1. 3879 3880 // For larger pattern and source we use a simplified Boyer Moore algorithm. 3881 // With a small pattern and source we use linear scan. 3882 3883 if (icnt1 == -1) { 3884 cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 3885 ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use 3886 br(LO, LINEARSEARCH); // a byte array. 3887 cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM 3888 br(HS, LINEARSEARCH); 3889 } 3890 3891 // The Boyer Moore alogorithm is based on the description here:- 3892 // 3893 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm 3894 // 3895 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule 3896 // and the 'Good Suffix' rule. 3897 // 3898 // These rules are essentially heuristics for how far we can shift the 3899 // pattern along the search string. 3900 // 3901 // The implementation here uses the 'Bad Character' rule only because of the 3902 // complexity of initialisation for the 'Good Suffix' rule. 3903 // 3904 // This is also known as the Boyer-Moore-Horspool algorithm:- 3905 // 3906 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm 3907 // 3908 // #define ASIZE 128 3909 // 3910 // int bm(unsigned char *x, int m, unsigned char *y, int n) { 3911 // int i, j; 3912 // unsigned c; 3913 // unsigned char bc[ASIZE]; 3914 // 3915 // /* Preprocessing */ 3916 // for (i = 0; i < ASIZE; ++i) 3917 // bc[i] = 0; 3918 // for (i = 0; i < m - 1; ) { 3919 // c = x[i]; 3920 // ++i; 3921 // if (c < ASIZE) bc[c] = i; 3922 // } 3923 // 3924 // /* Searching */ 3925 // j = 0; 3926 // while (j <= n - m) { 3927 // c = y[i+j]; 3928 // if (x[m-1] == c) 3929 // for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i); 3930 // if (i < 0) return j; 3931 // if (c < ASIZE) 3932 // j = j - bc[y[j+m-1]] + m; 3933 // else 3934 // j += 1; // Advance by 1 only if char >= ASIZE 3935 // } 3936 // } 3937 3938 if (icnt1 == -1) { 3939 BIND(BM); 3940 3941 Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP; 3942 Label BMADV, BMMATCH, BMCHECKEND; 3943 3944 Register cnt1end = tmp2; 3945 Register str2end = cnt2; 3946 Register skipch = tmp2; 3947 3948 // Restrict ASIZE to 128 to reduce stack space/initialisation. 3949 // The presence of chars >= ASIZE in the target string does not affect 3950 // performance, but we must be careful not to initialise them in the stack 3951 // array. 3952 // The presence of chars >= ASIZE in the source string may adversely affect 3953 // performance since we can only advance by one when we encounter one. 3954 3955 stp(zr, zr, pre(sp, -128)); 3956 for (int i = 1; i < 8; i++) 3957 stp(zr, zr, Address(sp, i*16)); 3958 3959 mov(cnt1tmp, 0); 3960 sub(cnt1end, cnt1, 1); 3961 BIND(BCLOOP); 3962 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 3963 cmp(ch1, 128); 3964 add(cnt1tmp, cnt1tmp, 1); 3965 br(HS, BCSKIP); 3966 strb(cnt1tmp, Address(sp, ch1)); 3967 BIND(BCSKIP); 3968 cmp(cnt1tmp, cnt1end); 3969 br(LT, BCLOOP); 3970 3971 mov(result_tmp, str2); 3972 3973 sub(cnt2, cnt2, cnt1); 3974 add(str2end, str2, cnt2, LSL, 1); 3975 BIND(BMLOOPSTR2); 3976 sub(cnt1tmp, cnt1, 1); 3977 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 3978 ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1))); 3979 cmp(ch1, skipch); 3980 br(NE, BMSKIP); 3981 subs(cnt1tmp, cnt1tmp, 1); 3982 br(LT, BMMATCH); 3983 BIND(BMLOOPSTR1); 3984 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 3985 ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1))); 3986 cmp(ch1, ch2); 3987 br(NE, BMSKIP); 3988 subs(cnt1tmp, cnt1tmp, 1); 3989 br(GE, BMLOOPSTR1); 3990 BIND(BMMATCH); 3991 sub(result_tmp, str2, result_tmp); 3992 lsr(result, result_tmp, 1); 3993 add(sp, sp, 128); 3994 b(DONE); 3995 BIND(BMADV); 3996 add(str2, str2, 2); 3997 b(BMCHECKEND); 3998 BIND(BMSKIP); 3999 cmp(skipch, 128); 4000 br(HS, BMADV); 4001 ldrb(ch2, Address(sp, skipch)); 4002 add(str2, str2, cnt1, LSL, 1); 4003 sub(str2, str2, ch2, LSL, 1); 4004 BIND(BMCHECKEND); 4005 cmp(str2, str2end); 4006 br(LE, BMLOOPSTR2); 4007 add(sp, sp, 128); 4008 b(NOMATCH); 4009 } 4010 4011 BIND(LINEARSEARCH); 4012 { 4013 Label DO1, DO2, DO3; 4014 4015 Register str2tmp = tmp2; 4016 Register first = tmp3; 4017 4018 if (icnt1 == -1) 4019 { 4020 Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD; 4021 4022 cmp(cnt1, 4); 4023 br(LT, DOSHORT); 4024 4025 sub(cnt2, cnt2, cnt1); 4026 sub(cnt1, cnt1, 4); 4027 mov(result_tmp, cnt2); 4028 4029 lea(str1, Address(str1, cnt1, Address::uxtw(1))); 4030 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4031 sub(cnt1_neg, zr, cnt1, LSL, 1); 4032 sub(cnt2_neg, zr, cnt2, LSL, 1); 4033 ldr(first, Address(str1, cnt1_neg)); 4034 4035 BIND(FIRST_LOOP); 4036 ldr(ch2, Address(str2, cnt2_neg)); 4037 cmp(first, ch2); 4038 br(EQ, STR1_LOOP); 4039 BIND(STR2_NEXT); 4040 adds(cnt2_neg, cnt2_neg, 2); 4041 br(LE, FIRST_LOOP); 4042 b(NOMATCH); 4043 4044 BIND(STR1_LOOP); 4045 adds(cnt1tmp, cnt1_neg, 8); 4046 add(cnt2tmp, cnt2_neg, 8); 4047 br(GE, LAST_WORD); 4048 4049 BIND(STR1_NEXT); 4050 ldr(ch1, Address(str1, cnt1tmp)); 4051 ldr(ch2, Address(str2, cnt2tmp)); 4052 cmp(ch1, ch2); 4053 br(NE, STR2_NEXT); 4054 adds(cnt1tmp, cnt1tmp, 8); 4055 add(cnt2tmp, cnt2tmp, 8); 4056 br(LT, STR1_NEXT); 4057 4058 BIND(LAST_WORD); 4059 ldr(ch1, Address(str1)); 4060 sub(str2tmp, str2, cnt1_neg); // adjust to corresponding 4061 ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2 4062 cmp(ch1, ch2); 4063 br(NE, STR2_NEXT); 4064 b(MATCH); 4065 4066 BIND(DOSHORT); 4067 cmp(cnt1, 2); 4068 br(LT, DO1); 4069 br(GT, DO3); 4070 } 4071 4072 if (icnt1 == 4) { 4073 Label CH1_LOOP; 4074 4075 ldr(ch1, str1); 4076 sub(cnt2, cnt2, 4); 4077 mov(result_tmp, cnt2); 4078 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4079 sub(cnt2_neg, zr, cnt2, LSL, 1); 4080 4081 BIND(CH1_LOOP); 4082 ldr(ch2, Address(str2, cnt2_neg)); 4083 cmp(ch1, ch2); 4084 br(EQ, MATCH); 4085 adds(cnt2_neg, cnt2_neg, 2); 4086 br(LE, CH1_LOOP); 4087 b(NOMATCH); 4088 } 4089 4090 if (icnt1 == -1 || icnt1 == 2) { 4091 Label CH1_LOOP; 4092 4093 BIND(DO2); 4094 ldrw(ch1, str1); 4095 sub(cnt2, cnt2, 2); 4096 mov(result_tmp, cnt2); 4097 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4098 sub(cnt2_neg, zr, cnt2, LSL, 1); 4099 4100 BIND(CH1_LOOP); 4101 ldrw(ch2, Address(str2, cnt2_neg)); 4102 cmp(ch1, ch2); 4103 br(EQ, MATCH); 4104 adds(cnt2_neg, cnt2_neg, 2); 4105 br(LE, CH1_LOOP); 4106 b(NOMATCH); 4107 } 4108 4109 if (icnt1 == -1 || icnt1 == 3) { 4110 Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; 4111 4112 BIND(DO3); 4113 ldrw(first, str1); 4114 ldrh(ch1, Address(str1, 4)); 4115 4116 sub(cnt2, cnt2, 3); 4117 mov(result_tmp, cnt2); 4118 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4119 sub(cnt2_neg, zr, cnt2, LSL, 1); 4120 4121 BIND(FIRST_LOOP); 4122 ldrw(ch2, Address(str2, cnt2_neg)); 4123 cmpw(first, ch2); 4124 br(EQ, STR1_LOOP); 4125 BIND(STR2_NEXT); 4126 adds(cnt2_neg, cnt2_neg, 2); 4127 br(LE, FIRST_LOOP); 4128 b(NOMATCH); 4129 4130 BIND(STR1_LOOP); 4131 add(cnt2tmp, cnt2_neg, 4); 4132 ldrh(ch2, Address(str2, cnt2tmp)); 4133 cmp(ch1, ch2); 4134 br(NE, STR2_NEXT); 4135 b(MATCH); 4136 } 4137 4138 if (icnt1 == -1 || icnt1 == 1) { 4139 Label CH1_LOOP, HAS_ZERO; 4140 Label DO1_SHORT, DO1_LOOP; 4141 4142 BIND(DO1); 4143 ldrh(ch1, str1); 4144 cmp(cnt2, 4); 4145 br(LT, DO1_SHORT); 4146 4147 orr(ch1, ch1, ch1, LSL, 16); 4148 orr(ch1, ch1, ch1, LSL, 32); 4149 4150 sub(cnt2, cnt2, 4); 4151 mov(result_tmp, cnt2); 4152 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4153 sub(cnt2_neg, zr, cnt2, LSL, 1); 4154 4155 mov(tmp3, 0x0001000100010001); 4156 BIND(CH1_LOOP); 4157 ldr(ch2, Address(str2, cnt2_neg)); 4158 eor(ch2, ch1, ch2); 4159 sub(tmp1, ch2, tmp3); 4160 orr(tmp2, ch2, 0x7fff7fff7fff7fff); 4161 bics(tmp1, tmp1, tmp2); 4162 br(NE, HAS_ZERO); 4163 adds(cnt2_neg, cnt2_neg, 8); 4164 br(LT, CH1_LOOP); 4165 4166 cmp(cnt2_neg, 8); 4167 mov(cnt2_neg, 0); 4168 br(LT, CH1_LOOP); 4169 b(NOMATCH); 4170 4171 BIND(HAS_ZERO); 4172 rev(tmp1, tmp1); 4173 clz(tmp1, tmp1); 4174 add(cnt2_neg, cnt2_neg, tmp1, LSR, 3); 4175 b(MATCH); 4176 4177 BIND(DO1_SHORT); 4178 mov(result_tmp, cnt2); 4179 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4180 sub(cnt2_neg, zr, cnt2, LSL, 1); 4181 BIND(DO1_LOOP); 4182 ldrh(ch2, Address(str2, cnt2_neg)); 4183 cmpw(ch1, ch2); 4184 br(EQ, MATCH); 4185 adds(cnt2_neg, cnt2_neg, 2); 4186 br(LT, DO1_LOOP); 4187 } 4188 } 4189 BIND(NOMATCH); 4190 mov(result, -1); 4191 b(DONE); 4192 BIND(MATCH); 4193 add(result, result_tmp, cnt2_neg, ASR, 1); 4194 BIND(DONE); 4195 } 4196 4197 // Compare strings. 4198 void MacroAssembler::string_compare(Register str1, Register str2, 4199 Register cnt1, Register cnt2, Register result, 4200 Register tmp1) { 4201 Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING, 4202 NEXT_WORD, DIFFERENCE; 4203 4204 BLOCK_COMMENT("string_compare {"); 4205 4206 // Compute the minimum of the string lengths and save the difference. 4207 subsw(tmp1, cnt1, cnt2); 4208 cselw(cnt2, cnt1, cnt2, Assembler::LE); // min 4209 4210 // A very short string 4211 cmpw(cnt2, 4); 4212 br(Assembler::LT, SHORT_STRING); 4213 4214 // Check if the strings start at the same location. 4215 cmp(str1, str2); 4216 br(Assembler::EQ, LENGTH_DIFF); 4217 4218 // Compare longwords 4219 { 4220 subw(cnt2, cnt2, 4); // The last longword is a special case 4221 4222 // Move both string pointers to the last longword of their 4223 // strings, negate the remaining count, and convert it to bytes. 4224 lea(str1, Address(str1, cnt2, Address::uxtw(1))); 4225 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4226 sub(cnt2, zr, cnt2, LSL, 1); 4227 4228 // Loop, loading longwords and comparing them into rscratch2. 4229 bind(NEXT_WORD); 4230 ldr(result, Address(str1, cnt2)); 4231 ldr(cnt1, Address(str2, cnt2)); 4232 adds(cnt2, cnt2, wordSize); 4233 eor(rscratch2, result, cnt1); 4234 cbnz(rscratch2, DIFFERENCE); 4235 br(Assembler::LT, NEXT_WORD); 4236 4237 // Last longword. In the case where length == 4 we compare the 4238 // same longword twice, but that's still faster than another 4239 // conditional branch. 4240 4241 ldr(result, Address(str1)); 4242 ldr(cnt1, Address(str2)); 4243 eor(rscratch2, result, cnt1); 4244 cbz(rscratch2, LENGTH_DIFF); 4245 4246 // Find the first different characters in the longwords and 4247 // compute their difference. 4248 bind(DIFFERENCE); 4249 rev(rscratch2, rscratch2); 4250 clz(rscratch2, rscratch2); 4251 andr(rscratch2, rscratch2, -16); 4252 lsrv(result, result, rscratch2); 4253 uxthw(result, result); 4254 lsrv(cnt1, cnt1, rscratch2); 4255 uxthw(cnt1, cnt1); 4256 subw(result, result, cnt1); 4257 b(DONE); 4258 } 4259 4260 bind(SHORT_STRING); 4261 // Is the minimum length zero? 4262 cbz(cnt2, LENGTH_DIFF); 4263 4264 bind(SHORT_LOOP); 4265 load_unsigned_short(result, Address(post(str1, 2))); 4266 load_unsigned_short(cnt1, Address(post(str2, 2))); 4267 subw(result, result, cnt1); 4268 cbnz(result, DONE); 4269 sub(cnt2, cnt2, 1); 4270 cbnz(cnt2, SHORT_LOOP); 4271 4272 // Strings are equal up to min length. Return the length difference. 4273 bind(LENGTH_DIFF); 4274 mov(result, tmp1); 4275 4276 // That's it 4277 bind(DONE); 4278 4279 BLOCK_COMMENT("} string_compare"); 4280 } 4281 4282 4283 void MacroAssembler::string_equals(Register str1, Register str2, 4284 Register cnt, Register result, 4285 Register tmp1) { 4286 Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING, 4287 NEXT_WORD; 4288 4289 const Register tmp2 = rscratch1; 4290 assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2); 4291 4292 BLOCK_COMMENT("string_equals {"); 4293 4294 // Start by assuming that the strings are not equal. 4295 mov(result, zr); 4296 4297 // A very short string 4298 cmpw(cnt, 4); 4299 br(Assembler::LT, SHORT_STRING); 4300 4301 // Check if the strings start at the same location. 4302 cmp(str1, str2); 4303 br(Assembler::EQ, SAME_CHARS); 4304 4305 // Compare longwords 4306 { 4307 subw(cnt, cnt, 4); // The last longword is a special case 4308 4309 // Move both string pointers to the last longword of their 4310 // strings, negate the remaining count, and convert it to bytes. 4311 lea(str1, Address(str1, cnt, Address::uxtw(1))); 4312 lea(str2, Address(str2, cnt, Address::uxtw(1))); 4313 sub(cnt, zr, cnt, LSL, 1); 4314 4315 // Loop, loading longwords and comparing them into rscratch2. 4316 bind(NEXT_WORD); 4317 ldr(tmp1, Address(str1, cnt)); 4318 ldr(tmp2, Address(str2, cnt)); 4319 adds(cnt, cnt, wordSize); 4320 eor(rscratch2, tmp1, tmp2); 4321 cbnz(rscratch2, DONE); 4322 br(Assembler::LT, NEXT_WORD); 4323 4324 // Last longword. In the case where length == 4 we compare the 4325 // same longword twice, but that's still faster than another 4326 // conditional branch. 4327 4328 ldr(tmp1, Address(str1)); 4329 ldr(tmp2, Address(str2)); 4330 eor(rscratch2, tmp1, tmp2); 4331 cbz(rscratch2, SAME_CHARS); 4332 b(DONE); 4333 } 4334 4335 bind(SHORT_STRING); 4336 // Is the length zero? 4337 cbz(cnt, SAME_CHARS); 4338 4339 bind(SHORT_LOOP); 4340 load_unsigned_short(tmp1, Address(post(str1, 2))); 4341 load_unsigned_short(tmp2, Address(post(str2, 2))); 4342 subw(tmp1, tmp1, tmp2); 4343 cbnz(tmp1, DONE); 4344 sub(cnt, cnt, 1); 4345 cbnz(cnt, SHORT_LOOP); 4346 4347 // Strings are equal. 4348 bind(SAME_CHARS); 4349 mov(result, true); 4350 4351 // That's it 4352 bind(DONE); 4353 4354 BLOCK_COMMENT("} string_equals"); 4355 } 4356 4357 // Compare char[] arrays aligned to 4 bytes 4358 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4359 Register result, Register tmp1) 4360 { 4361 Register cnt1 = rscratch1; 4362 Register cnt2 = rscratch2; 4363 Register tmp2 = rscratch2; 4364 4365 Label SAME, DIFFER, NEXT, TAIL03, TAIL01; 4366 4367 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4368 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 4369 4370 BLOCK_COMMENT("char_arrays_equals {"); 4371 4372 // different until proven equal 4373 mov(result, false); 4374 4375 // same array? 4376 cmp(ary1, ary2); 4377 br(Assembler::EQ, SAME); 4378 4379 // ne if either null 4380 cbz(ary1, DIFFER); 4381 cbz(ary2, DIFFER); 4382 4383 // lengths ne? 4384 ldrw(cnt1, Address(ary1, length_offset)); 4385 ldrw(cnt2, Address(ary2, length_offset)); 4386 cmp(cnt1, cnt2); 4387 br(Assembler::NE, DIFFER); 4388 4389 lea(ary1, Address(ary1, base_offset)); 4390 lea(ary2, Address(ary2, base_offset)); 4391 4392 subs(cnt1, cnt1, 4); 4393 br(LT, TAIL03); 4394 4395 BIND(NEXT); 4396 ldr(tmp1, Address(post(ary1, 8))); 4397 ldr(tmp2, Address(post(ary2, 8))); 4398 subs(cnt1, cnt1, 4); 4399 eor(tmp1, tmp1, tmp2); 4400 cbnz(tmp1, DIFFER); 4401 br(GE, NEXT); 4402 4403 BIND(TAIL03); // 0-3 chars left, cnt1 = #chars left - 4 4404 tst(cnt1, 0b10); 4405 br(EQ, TAIL01); 4406 ldrw(tmp1, Address(post(ary1, 4))); 4407 ldrw(tmp2, Address(post(ary2, 4))); 4408 cmp(tmp1, tmp2); 4409 br(NE, DIFFER); 4410 BIND(TAIL01); // 0-1 chars left 4411 tst(cnt1, 0b01); 4412 br(EQ, SAME); 4413 ldrh(tmp1, ary1); 4414 ldrh(tmp2, ary2); 4415 cmp(tmp1, tmp2); 4416 br(NE, DIFFER); 4417 4418 BIND(SAME); 4419 mov(result, true); 4420 BIND(DIFFER); // result already set 4421 4422 BLOCK_COMMENT("} char_arrays_equals"); 4423 } 4424 4425 // encode char[] to byte[] in ISO_8859_1 4426 void MacroAssembler::encode_iso_array(Register src, Register dst, 4427 Register len, Register result, 4428 FloatRegister Vtmp1, FloatRegister Vtmp2, 4429 FloatRegister Vtmp3, FloatRegister Vtmp4) 4430 { 4431 Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1; 4432 Register tmp1 = rscratch1; 4433 4434 mov(result, len); // Save initial len 4435 4436 #ifndef BUILTIN_SIM 4437 subs(len, len, 32); 4438 br(LT, LOOP_8); 4439 4440 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions 4441 // to convert chars to bytes. These set the 'QC' bit in the FPSR if 4442 // any char could not fit in a byte, so clear the FPSR so we can test it. 4443 clear_fpsr(); 4444 4445 BIND(NEXT_32); 4446 ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); 4447 uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half 4448 uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half 4449 uqxtn(Vtmp2, T8B, Vtmp3, T8H); 4450 uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2 4451 get_fpsr(tmp1); 4452 cbnzw(tmp1, LOOP_8); 4453 st1(Vtmp1, Vtmp2, T16B, post(dst, 32)); 4454 subs(len, len, 32); 4455 add(src, src, 64); 4456 br(GE, NEXT_32); 4457 4458 BIND(LOOP_8); 4459 adds(len, len, 32-8); 4460 br(LT, LOOP_1); 4461 clear_fpsr(); // QC may be set from loop above, clear again 4462 BIND(NEXT_8); 4463 ld1(Vtmp1, T8H, src); 4464 uqxtn(Vtmp1, T8B, Vtmp1, T8H); 4465 get_fpsr(tmp1); 4466 cbnzw(tmp1, LOOP_1); 4467 st1(Vtmp1, T8B, post(dst, 8)); 4468 subs(len, len, 8); 4469 add(src, src, 16); 4470 br(GE, NEXT_8); 4471 4472 BIND(LOOP_1); 4473 adds(len, len, 8); 4474 br(LE, DONE); 4475 #else 4476 cbz(len, DONE); 4477 #endif 4478 BIND(NEXT_1); 4479 ldrh(tmp1, Address(post(src, 2))); 4480 tst(tmp1, 0xff00); 4481 br(NE, DONE); 4482 strb(tmp1, Address(post(dst, 1))); 4483 subs(len, len, 1); 4484 br(GT, NEXT_1); 4485 4486 BIND(DONE); 4487 sub(result, result, len); // Return index where we stopped 4488 }