1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 33 #include "compiler/disassembler.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "nativeInst_aarch64.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/node.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/icache.hpp" 40 #include "runtime/interfaceSupport.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 #if INCLUDE_ALL_GCS 44 #include "gc/g1/g1CollectedHeap.inline.hpp" 45 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 46 #include "gc/g1/heapRegion.hpp" 47 #endif 48 49 #ifdef PRODUCT 50 #define BLOCK_COMMENT(str) /* nothing */ 51 #define STOP(error) stop(error) 52 #else 53 #define BLOCK_COMMENT(str) block_comment(str) 54 #define STOP(error) block_comment(error); stop(error) 55 #endif 56 57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 58 59 // Patch any kind of instruction; there may be several instructions. 60 // Return the total length (in bytes) of the instructions. 61 int MacroAssembler::pd_patch_instruction_size(address branch, address target) { 62 int instructions = 1; 63 assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); 64 long offset = (target - branch) >> 2; 65 unsigned insn = *(unsigned*)branch; 66 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { 67 // Load register (literal) 68 Instruction_aarch64::spatch(branch, 23, 5, offset); 69 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 70 // Unconditional branch (immediate) 71 Instruction_aarch64::spatch(branch, 25, 0, offset); 72 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 73 // Conditional branch (immediate) 74 Instruction_aarch64::spatch(branch, 23, 5, offset); 75 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 76 // Compare & branch (immediate) 77 Instruction_aarch64::spatch(branch, 23, 5, offset); 78 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 79 // Test & branch (immediate) 80 Instruction_aarch64::spatch(branch, 18, 5, offset); 81 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 82 // PC-rel. addressing 83 offset = target-branch; 84 int shift = Instruction_aarch64::extract(insn, 31, 31); 85 if (shift) { 86 u_int64_t dest = (u_int64_t)target; 87 uint64_t pc_page = (uint64_t)branch >> 12; 88 uint64_t adr_page = (uint64_t)target >> 12; 89 unsigned offset_lo = dest & 0xfff; 90 offset = adr_page - pc_page; 91 92 // We handle 3 types of PC relative addressing 93 // 1 - adrp Rx, target_page 94 // ldr/str Ry, [Rx, #offset_in_page] 95 // 2 - adrp Rx, target_page 96 // add Ry, Rx, #offset_in_page 97 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 98 // In the first 2 cases we must check that Rx is the same in the adrp and the 99 // subsequent ldr/str or add instruction. Otherwise we could accidentally end 100 // up treating a type 3 relocation as a type 1 or 2 just because it happened 101 // to be followed by a random unrelated ldr/str or add instruction. 102 // 103 // In the case of a type 3 relocation, we know that these are only generated 104 // for the safepoint polling page, or for the card type byte map base so we 105 // assert as much and of course that the offset is 0. 106 // 107 unsigned insn2 = ((unsigned*)branch)[1]; 108 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 109 Instruction_aarch64::extract(insn, 4, 0) == 110 Instruction_aarch64::extract(insn2, 9, 5)) { 111 // Load/store register (unsigned immediate) 112 unsigned size = Instruction_aarch64::extract(insn2, 31, 30); 113 Instruction_aarch64::patch(branch + sizeof (unsigned), 114 21, 10, offset_lo >> size); 115 guarantee(((dest >> size) << size) == dest, "misaligned target"); 116 instructions = 2; 117 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 118 Instruction_aarch64::extract(insn, 4, 0) == 119 Instruction_aarch64::extract(insn2, 4, 0)) { 120 // add (immediate) 121 Instruction_aarch64::patch(branch + sizeof (unsigned), 122 21, 10, offset_lo); 123 instructions = 2; 124 } else { 125 assert((jbyte *)target == 126 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 127 target == StubRoutines::crc_table_addr() || 128 (address)target == os::get_polling_page(), 129 "adrp must be polling page or byte map base"); 130 assert(offset_lo == 0, "offset must be 0 for polling page or byte map base"); 131 } 132 } 133 int offset_lo = offset & 3; 134 offset >>= 2; 135 Instruction_aarch64::spatch(branch, 23, 5, offset); 136 Instruction_aarch64::patch(branch, 30, 29, offset_lo); 137 } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { 138 u_int64_t dest = (u_int64_t)target; 139 // Move wide constant 140 assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); 141 assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); 142 Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff); 143 Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff); 144 Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff); 145 assert(target_addr_for_insn(branch) == target, "should be"); 146 instructions = 3; 147 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 148 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 149 // nothing to do 150 assert(target == 0, "did not expect to relocate target for polling page load"); 151 } else { 152 ShouldNotReachHere(); 153 } 154 return instructions * NativeInstruction::instruction_size; 155 } 156 157 int MacroAssembler::patch_oop(address insn_addr, address o) { 158 int instructions; 159 unsigned insn = *(unsigned*)insn_addr; 160 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 161 162 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 163 // narrow OOPs by setting the upper 16 bits in the first 164 // instruction. 165 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 166 // Move narrow OOP 167 narrowOop n = oopDesc::encode_heap_oop((oop)o); 168 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 169 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 170 instructions = 2; 171 } else { 172 // Move wide OOP 173 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 174 uintptr_t dest = (uintptr_t)o; 175 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 176 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 177 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 178 instructions = 3; 179 } 180 return instructions * NativeInstruction::instruction_size; 181 } 182 183 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { 184 long offset = 0; 185 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { 186 // Load register (literal) 187 offset = Instruction_aarch64::sextract(insn, 23, 5); 188 return address(((uint64_t)insn_addr + (offset << 2))); 189 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 190 // Unconditional branch (immediate) 191 offset = Instruction_aarch64::sextract(insn, 25, 0); 192 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 193 // Conditional branch (immediate) 194 offset = Instruction_aarch64::sextract(insn, 23, 5); 195 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 196 // Compare & branch (immediate) 197 offset = Instruction_aarch64::sextract(insn, 23, 5); 198 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 199 // Test & branch (immediate) 200 offset = Instruction_aarch64::sextract(insn, 18, 5); 201 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 202 // PC-rel. addressing 203 offset = Instruction_aarch64::extract(insn, 30, 29); 204 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2; 205 int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0; 206 if (shift) { 207 offset <<= shift; 208 uint64_t target_page = ((uint64_t)insn_addr) + offset; 209 target_page &= ((uint64_t)-1) << shift; 210 // Return the target address for the following sequences 211 // 1 - adrp Rx, target_page 212 // ldr/str Ry, [Rx, #offset_in_page] 213 // 2 - adrp Rx, target_page ] 214 // add Ry, Rx, #offset_in_page 215 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 216 // 217 // In the first two cases we check that the register is the same and 218 // return the target_page + the offset within the page. 219 // Otherwise we assume it is a page aligned relocation and return 220 // the target page only. The only cases this is generated is for 221 // the safepoint polling page or for the card table byte map base so 222 // we assert as much. 223 // 224 unsigned insn2 = ((unsigned*)insn_addr)[1]; 225 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 226 Instruction_aarch64::extract(insn, 4, 0) == 227 Instruction_aarch64::extract(insn2, 9, 5)) { 228 // Load/store register (unsigned immediate) 229 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 230 unsigned int size = Instruction_aarch64::extract(insn2, 31, 30); 231 return address(target_page + (byte_offset << size)); 232 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 233 Instruction_aarch64::extract(insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 4, 0)) { 235 // add (immediate) 236 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 237 return address(target_page + byte_offset); 238 } else { 239 assert((jbyte *)target_page == 240 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 241 (address)target_page == os::get_polling_page(), 242 "adrp must be polling page or byte map base"); 243 return (address)target_page; 244 } 245 } else { 246 ShouldNotReachHere(); 247 } 248 } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { 249 u_int32_t *insns = (u_int32_t *)insn_addr; 250 // Move wide constant: movz, movk, movk. See movptr(). 251 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 252 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 253 return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) 254 + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 255 + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 256 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 257 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 258 return 0; 259 } else { 260 ShouldNotReachHere(); 261 } 262 return address(((uint64_t)insn_addr + (offset << 2))); 263 } 264 265 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 266 dsb(Assembler::SY); 267 } 268 269 270 void MacroAssembler::reset_last_Java_frame(bool clear_fp, 271 bool clear_pc) { 272 // we must set sp to zero to clear frame 273 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 274 // must clear fp, so that compiled frames are not confused; it is 275 // possible that we need it only for debugging 276 if (clear_fp) { 277 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 278 } 279 280 if (clear_pc) { 281 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 282 } 283 } 284 285 // Calls to C land 286 // 287 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 288 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 289 // has to be reset to 0. This is required to allow proper stack traversal. 290 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 291 Register last_java_fp, 292 Register last_java_pc, 293 Register scratch) { 294 295 if (last_java_pc->is_valid()) { 296 str(last_java_pc, Address(rthread, 297 JavaThread::frame_anchor_offset() 298 + JavaFrameAnchor::last_Java_pc_offset())); 299 } 300 301 // determine last_java_sp register 302 if (last_java_sp == sp) { 303 mov(scratch, sp); 304 last_java_sp = scratch; 305 } else if (!last_java_sp->is_valid()) { 306 last_java_sp = esp; 307 } 308 309 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 310 311 // last_java_fp is optional 312 if (last_java_fp->is_valid()) { 313 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 314 } 315 } 316 317 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 318 Register last_java_fp, 319 address last_java_pc, 320 Register scratch) { 321 if (last_java_pc != NULL) { 322 adr(scratch, last_java_pc); 323 } else { 324 // FIXME: This is almost never correct. We should delete all 325 // cases of set_last_Java_frame with last_java_pc=NULL and use the 326 // correct return address instead. 327 adr(scratch, pc()); 328 } 329 330 str(scratch, Address(rthread, 331 JavaThread::frame_anchor_offset() 332 + JavaFrameAnchor::last_Java_pc_offset())); 333 334 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 335 } 336 337 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 338 Register last_java_fp, 339 Label &L, 340 Register scratch) { 341 if (L.is_bound()) { 342 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 343 } else { 344 InstructionMark im(this); 345 L.add_patch_at(code(), locator()); 346 set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch); 347 } 348 } 349 350 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { 351 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 352 assert(CodeCache::find_blob(entry.target()) != NULL, 353 "destination of far call not found in code cache"); 354 if (far_branches()) { 355 unsigned long offset; 356 // We can use ADRP here because we know that the total size of 357 // the code cache cannot exceed 2Gb. 358 adrp(tmp, entry, offset); 359 add(tmp, tmp, offset); 360 if (cbuf) cbuf->set_insts_mark(); 361 blr(tmp); 362 } else { 363 if (cbuf) cbuf->set_insts_mark(); 364 bl(entry); 365 } 366 } 367 368 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { 369 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 370 assert(CodeCache::find_blob(entry.target()) != NULL, 371 "destination of far call not found in code cache"); 372 if (far_branches()) { 373 unsigned long offset; 374 // We can use ADRP here because we know that the total size of 375 // the code cache cannot exceed 2Gb. 376 adrp(tmp, entry, offset); 377 add(tmp, tmp, offset); 378 if (cbuf) cbuf->set_insts_mark(); 379 br(tmp); 380 } else { 381 if (cbuf) cbuf->set_insts_mark(); 382 b(entry); 383 } 384 } 385 386 int MacroAssembler::biased_locking_enter(Register lock_reg, 387 Register obj_reg, 388 Register swap_reg, 389 Register tmp_reg, 390 bool swap_reg_contains_mark, 391 Label& done, 392 Label* slow_case, 393 BiasedLockingCounters* counters) { 394 assert(UseBiasedLocking, "why call this otherwise?"); 395 assert_different_registers(lock_reg, obj_reg, swap_reg); 396 397 if (PrintBiasedLockingStatistics && counters == NULL) 398 counters = BiasedLocking::counters(); 399 400 bool need_tmp_reg = false; 401 if (tmp_reg == noreg) { 402 tmp_reg = rscratch2; 403 } 404 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1); 405 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 406 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 407 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); 408 Address saved_mark_addr(lock_reg, 0); 409 410 // Biased locking 411 // See whether the lock is currently biased toward our thread and 412 // whether the epoch is still valid 413 // Note that the runtime guarantees sufficient alignment of JavaThread 414 // pointers to allow age to be placed into low bits 415 // First check to see whether biasing is even enabled for this object 416 Label cas_label; 417 int null_check_offset = -1; 418 if (!swap_reg_contains_mark) { 419 null_check_offset = offset(); 420 ldr(swap_reg, mark_addr); 421 } 422 andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); 423 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 424 br(Assembler::NE, cas_label); 425 // The bias pattern is present in the object's header. Need to check 426 // whether the bias owner and the epoch are both still current. 427 load_prototype_header(tmp_reg, obj_reg); 428 orr(tmp_reg, tmp_reg, rthread); 429 eor(tmp_reg, swap_reg, tmp_reg); 430 andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place)); 431 if (counters != NULL) { 432 Label around; 433 cbnz(tmp_reg, around); 434 atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1); 435 b(done); 436 bind(around); 437 } else { 438 cbz(tmp_reg, done); 439 } 440 441 Label try_revoke_bias; 442 Label try_rebias; 443 444 // At this point we know that the header has the bias pattern and 445 // that we are not the bias owner in the current epoch. We need to 446 // figure out more details about the state of the header in order to 447 // know what operations can be legally performed on the object's 448 // header. 449 450 // If the low three bits in the xor result aren't clear, that means 451 // the prototype header is no longer biased and we have to revoke 452 // the bias on this object. 453 andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place); 454 cbnz(rscratch1, try_revoke_bias); 455 456 // Biasing is still enabled for this data type. See whether the 457 // epoch of the current bias is still valid, meaning that the epoch 458 // bits of the mark word are equal to the epoch bits of the 459 // prototype header. (Note that the prototype header's epoch bits 460 // only change at a safepoint.) If not, attempt to rebias the object 461 // toward the current thread. Note that we must be absolutely sure 462 // that the current epoch is invalid in order to do this because 463 // otherwise the manipulations it performs on the mark word are 464 // illegal. 465 andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place); 466 cbnz(rscratch1, try_rebias); 467 468 // The epoch of the current bias is still valid but we know nothing 469 // about the owner; it might be set or it might be clear. Try to 470 // acquire the bias of the object using an atomic operation. If this 471 // fails we will go in to the runtime to revoke the object's bias. 472 // Note that we first construct the presumed unbiased header so we 473 // don't accidentally blow away another thread's valid bias. 474 { 475 Label here; 476 mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 477 andr(swap_reg, swap_reg, rscratch1); 478 orr(tmp_reg, swap_reg, rthread); 479 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 480 // If the biasing toward our thread failed, this means that 481 // another thread succeeded in biasing it toward itself and we 482 // need to revoke that bias. The revocation will occur in the 483 // interpreter runtime in the slow case. 484 bind(here); 485 if (counters != NULL) { 486 atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), 487 tmp_reg, rscratch1); 488 } 489 } 490 b(done); 491 492 bind(try_rebias); 493 // At this point we know the epoch has expired, meaning that the 494 // current "bias owner", if any, is actually invalid. Under these 495 // circumstances _only_, we are allowed to use the current header's 496 // value as the comparison value when doing the cas to acquire the 497 // bias in the current epoch. In other words, we allow transfer of 498 // the bias from one thread to another directly in this situation. 499 // 500 // FIXME: due to a lack of registers we currently blow away the age 501 // bits in this situation. Should attempt to preserve them. 502 { 503 Label here; 504 load_prototype_header(tmp_reg, obj_reg); 505 orr(tmp_reg, rthread, tmp_reg); 506 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 507 // If the biasing toward our thread failed, then another thread 508 // succeeded in biasing it toward itself and we need to revoke that 509 // bias. The revocation will occur in the runtime in the slow case. 510 bind(here); 511 if (counters != NULL) { 512 atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), 513 tmp_reg, rscratch1); 514 } 515 } 516 b(done); 517 518 bind(try_revoke_bias); 519 // The prototype mark in the klass doesn't have the bias bit set any 520 // more, indicating that objects of this data type are not supposed 521 // to be biased any more. We are going to try to reset the mark of 522 // this object to the prototype value and fall through to the 523 // CAS-based locking scheme. Note that if our CAS fails, it means 524 // that another thread raced us for the privilege of revoking the 525 // bias of this particular object, so it's okay to continue in the 526 // normal locking code. 527 // 528 // FIXME: due to a lack of registers we currently blow away the age 529 // bits in this situation. Should attempt to preserve them. 530 { 531 Label here, nope; 532 load_prototype_header(tmp_reg, obj_reg); 533 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); 534 bind(here); 535 536 // Fall through to the normal CAS-based lock, because no matter what 537 // the result of the above CAS, some thread must have succeeded in 538 // removing the bias bit from the object's header. 539 if (counters != NULL) { 540 atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg, 541 rscratch1); 542 } 543 bind(nope); 544 } 545 546 bind(cas_label); 547 548 return null_check_offset; 549 } 550 551 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 552 assert(UseBiasedLocking, "why call this otherwise?"); 553 554 // Check for biased locking unlock case, which is a no-op 555 // Note: we do not have to check the thread ID for two reasons. 556 // First, the interpreter checks for IllegalMonitorStateException at 557 // a higher level. Second, if the bias was revoked while we held the 558 // lock, the object could not be rebiased toward another thread, so 559 // the bias bit would be clear. 560 ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 561 andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 562 cmp(temp_reg, markOopDesc::biased_lock_pattern); 563 br(Assembler::EQ, done); 564 } 565 566 567 // added to make this compile 568 569 REGISTER_DEFINITION(Register, noreg); 570 571 static void pass_arg0(MacroAssembler* masm, Register arg) { 572 if (c_rarg0 != arg ) { 573 masm->mov(c_rarg0, arg); 574 } 575 } 576 577 static void pass_arg1(MacroAssembler* masm, Register arg) { 578 if (c_rarg1 != arg ) { 579 masm->mov(c_rarg1, arg); 580 } 581 } 582 583 static void pass_arg2(MacroAssembler* masm, Register arg) { 584 if (c_rarg2 != arg ) { 585 masm->mov(c_rarg2, arg); 586 } 587 } 588 589 static void pass_arg3(MacroAssembler* masm, Register arg) { 590 if (c_rarg3 != arg ) { 591 masm->mov(c_rarg3, arg); 592 } 593 } 594 595 void MacroAssembler::call_VM_base(Register oop_result, 596 Register java_thread, 597 Register last_java_sp, 598 address entry_point, 599 int number_of_arguments, 600 bool check_exceptions) { 601 // determine java_thread register 602 if (!java_thread->is_valid()) { 603 java_thread = rthread; 604 } 605 606 // determine last_java_sp register 607 if (!last_java_sp->is_valid()) { 608 last_java_sp = esp; 609 } 610 611 // debugging support 612 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 613 assert(java_thread == rthread, "unexpected register"); 614 #ifdef ASSERT 615 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 616 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 617 #endif // ASSERT 618 619 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 620 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 621 622 // push java thread (becomes first argument of C function) 623 624 mov(c_rarg0, java_thread); 625 626 // set last Java frame before call 627 assert(last_java_sp != rfp, "can't use rfp"); 628 629 Label l; 630 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 631 632 // do the call, remove parameters 633 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 634 635 // reset last Java frame 636 // Only interpreter should have to clear fp 637 reset_last_Java_frame(true, true); 638 639 // C++ interp handles this in the interpreter 640 check_and_handle_popframe(java_thread); 641 check_and_handle_earlyret(java_thread); 642 643 if (check_exceptions) { 644 // check for pending exceptions (java_thread is set upon return) 645 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 646 Label ok; 647 cbz(rscratch1, ok); 648 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 649 br(rscratch1); 650 bind(ok); 651 } 652 653 // get oop result if there is one and reset the value in the thread 654 if (oop_result->is_valid()) { 655 get_vm_result(oop_result, java_thread); 656 } 657 } 658 659 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 660 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 661 } 662 663 // Maybe emit a call via a trampoline. If the code cache is small 664 // trampolines won't be emitted. 665 666 void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) { 667 assert(entry.rspec().type() == relocInfo::runtime_call_type 668 || entry.rspec().type() == relocInfo::opt_virtual_call_type 669 || entry.rspec().type() == relocInfo::static_call_type 670 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 671 672 unsigned int start_offset = offset(); 673 if (far_branches() && !Compile::current()->in_scratch_emit_size()) { 674 emit_trampoline_stub(offset(), entry.target()); 675 if (Compile::current()->failing()) { return; } // CodeCache is full 676 } 677 678 if (cbuf) cbuf->set_insts_mark(); 679 relocate(entry.rspec()); 680 if (Assembler::reachable_from_branch_at(pc(), entry.target())) { 681 bl(entry.target()); 682 } else { 683 bl(pc()); 684 } 685 } 686 687 688 // Emit a trampoline stub for a call to a target which is too far away. 689 // 690 // code sequences: 691 // 692 // call-site: 693 // branch-and-link to <destination> or <trampoline stub> 694 // 695 // Related trampoline stub for this call site in the stub section: 696 // load the call target from the constant pool 697 // branch (LR still points to the call site above) 698 699 void MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 700 address dest) { 701 address stub = start_a_stub(Compile::MAX_stubs_size/2); 702 if (stub == NULL) { 703 return; 704 } 705 706 // Create a trampoline stub relocation which relates this trampoline stub 707 // with the call instruction at insts_call_instruction_offset in the 708 // instructions code-section. 709 align(wordSize); 710 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 711 + insts_call_instruction_offset)); 712 const int stub_start_offset = offset(); 713 714 // Now, create the trampoline stub's code: 715 // - load the call 716 // - call 717 Label target; 718 ldr(rscratch1, target); 719 br(rscratch1); 720 bind(target); 721 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 722 "should be"); 723 emit_int64((int64_t)dest); 724 725 const address stub_start_addr = addr_at(stub_start_offset); 726 727 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 728 729 end_a_stub(); 730 } 731 732 void MacroAssembler::ic_call(address entry) { 733 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 734 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 735 // unsigned long offset; 736 // ldr_constant(rscratch2, const_ptr); 737 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 738 trampoline_call(Address(entry, rh)); 739 } 740 741 // Implementation of call_VM versions 742 743 void MacroAssembler::call_VM(Register oop_result, 744 address entry_point, 745 bool check_exceptions) { 746 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 747 } 748 749 void MacroAssembler::call_VM(Register oop_result, 750 address entry_point, 751 Register arg_1, 752 bool check_exceptions) { 753 pass_arg1(this, arg_1); 754 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 755 } 756 757 void MacroAssembler::call_VM(Register oop_result, 758 address entry_point, 759 Register arg_1, 760 Register arg_2, 761 bool check_exceptions) { 762 assert(arg_1 != c_rarg2, "smashed arg"); 763 pass_arg2(this, arg_2); 764 pass_arg1(this, arg_1); 765 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 766 } 767 768 void MacroAssembler::call_VM(Register oop_result, 769 address entry_point, 770 Register arg_1, 771 Register arg_2, 772 Register arg_3, 773 bool check_exceptions) { 774 assert(arg_1 != c_rarg3, "smashed arg"); 775 assert(arg_2 != c_rarg3, "smashed arg"); 776 pass_arg3(this, arg_3); 777 778 assert(arg_1 != c_rarg2, "smashed arg"); 779 pass_arg2(this, arg_2); 780 781 pass_arg1(this, arg_1); 782 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 783 } 784 785 void MacroAssembler::call_VM(Register oop_result, 786 Register last_java_sp, 787 address entry_point, 788 int number_of_arguments, 789 bool check_exceptions) { 790 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 791 } 792 793 void MacroAssembler::call_VM(Register oop_result, 794 Register last_java_sp, 795 address entry_point, 796 Register arg_1, 797 bool check_exceptions) { 798 pass_arg1(this, arg_1); 799 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 800 } 801 802 void MacroAssembler::call_VM(Register oop_result, 803 Register last_java_sp, 804 address entry_point, 805 Register arg_1, 806 Register arg_2, 807 bool check_exceptions) { 808 809 assert(arg_1 != c_rarg2, "smashed arg"); 810 pass_arg2(this, arg_2); 811 pass_arg1(this, arg_1); 812 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 813 } 814 815 void MacroAssembler::call_VM(Register oop_result, 816 Register last_java_sp, 817 address entry_point, 818 Register arg_1, 819 Register arg_2, 820 Register arg_3, 821 bool check_exceptions) { 822 assert(arg_1 != c_rarg3, "smashed arg"); 823 assert(arg_2 != c_rarg3, "smashed arg"); 824 pass_arg3(this, arg_3); 825 assert(arg_1 != c_rarg2, "smashed arg"); 826 pass_arg2(this, arg_2); 827 pass_arg1(this, arg_1); 828 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 829 } 830 831 832 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 833 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 834 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 835 verify_oop(oop_result, "broken oop in call_VM_base"); 836 } 837 838 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 839 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 840 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 841 } 842 843 void MacroAssembler::align(int modulus) { 844 while (offset() % modulus != 0) nop(); 845 } 846 847 // these are no-ops overridden by InterpreterMacroAssembler 848 849 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 850 851 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 852 853 854 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 855 Register tmp, 856 int offset) { 857 intptr_t value = *delayed_value_addr; 858 if (value != 0) 859 return RegisterOrConstant(value + offset); 860 861 // load indirectly to solve generation ordering problem 862 ldr(tmp, ExternalAddress((address) delayed_value_addr)); 863 864 if (offset != 0) 865 add(tmp, tmp, offset); 866 867 return RegisterOrConstant(tmp); 868 } 869 870 871 void MacroAssembler:: notify(int type) { 872 if (type == bytecode_start) { 873 // set_last_Java_frame(esp, rfp, (address)NULL); 874 Assembler:: notify(type); 875 // reset_last_Java_frame(true, false); 876 } 877 else 878 Assembler:: notify(type); 879 } 880 881 // Look up the method for a megamorphic invokeinterface call. 882 // The target method is determined by <intf_klass, itable_index>. 883 // The receiver klass is in recv_klass. 884 // On success, the result will be in method_result, and execution falls through. 885 // On failure, execution transfers to the given label. 886 void MacroAssembler::lookup_interface_method(Register recv_klass, 887 Register intf_klass, 888 RegisterOrConstant itable_index, 889 Register method_result, 890 Register scan_temp, 891 Label& L_no_such_interface) { 892 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 893 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 894 "caller must use same register for non-constant itable index as for method"); 895 896 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 897 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 898 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 899 int scan_step = itableOffsetEntry::size() * wordSize; 900 int vte_size = vtableEntry::size() * wordSize; 901 assert(vte_size == wordSize, "else adjust times_vte_scale"); 902 903 ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); 904 905 // %%% Could store the aligned, prescaled offset in the klassoop. 906 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 907 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 908 add(scan_temp, scan_temp, vtable_base); 909 if (HeapWordsPerLong > 1) { 910 // Round up to align_object_offset boundary 911 // see code for instanceKlass::start_of_itable! 912 round_to(scan_temp, BytesPerLong); 913 } 914 915 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 916 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 917 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 918 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 919 if (itentry_off) 920 add(recv_klass, recv_klass, itentry_off); 921 922 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 923 // if (scan->interface() == intf) { 924 // result = (klass + scan->offset() + itable_index); 925 // } 926 // } 927 Label search, found_method; 928 929 for (int peel = 1; peel >= 0; peel--) { 930 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 931 cmp(intf_klass, method_result); 932 933 if (peel) { 934 br(Assembler::EQ, found_method); 935 } else { 936 br(Assembler::NE, search); 937 // (invert the test to fall through to found_method...) 938 } 939 940 if (!peel) break; 941 942 bind(search); 943 944 // Check that the previous entry is non-null. A null entry means that 945 // the receiver class doesn't implement the interface, and wasn't the 946 // same as when the caller was compiled. 947 cbz(method_result, L_no_such_interface); 948 add(scan_temp, scan_temp, scan_step); 949 } 950 951 bind(found_method); 952 953 // Got a hit. 954 ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 955 ldr(method_result, Address(recv_klass, scan_temp)); 956 } 957 958 // virtual method calling 959 void MacroAssembler::lookup_virtual_method(Register recv_klass, 960 RegisterOrConstant vtable_index, 961 Register method_result) { 962 const int base = InstanceKlass::vtable_start_offset() * wordSize; 963 assert(vtableEntry::size() * wordSize == 8, 964 "adjust the scaling in the code below"); 965 int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); 966 967 if (vtable_index.is_register()) { 968 lea(method_result, Address(recv_klass, 969 vtable_index.as_register(), 970 Address::lsl(LogBytesPerWord))); 971 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 972 } else { 973 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 974 ldr(method_result, Address(recv_klass, vtable_offset_in_bytes)); 975 } 976 } 977 978 void MacroAssembler::check_klass_subtype(Register sub_klass, 979 Register super_klass, 980 Register temp_reg, 981 Label& L_success) { 982 Label L_failure; 983 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 984 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 985 bind(L_failure); 986 } 987 988 989 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 990 Register super_klass, 991 Register temp_reg, 992 Label* L_success, 993 Label* L_failure, 994 Label* L_slow_path, 995 RegisterOrConstant super_check_offset) { 996 assert_different_registers(sub_klass, super_klass, temp_reg); 997 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 998 if (super_check_offset.is_register()) { 999 assert_different_registers(sub_klass, super_klass, 1000 super_check_offset.as_register()); 1001 } else if (must_load_sco) { 1002 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1003 } 1004 1005 Label L_fallthrough; 1006 int label_nulls = 0; 1007 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1008 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1009 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1010 assert(label_nulls <= 1, "at most one NULL in the batch"); 1011 1012 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1013 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1014 Address super_check_offset_addr(super_klass, sco_offset); 1015 1016 // Hacked jmp, which may only be used just before L_fallthrough. 1017 #define final_jmp(label) \ 1018 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1019 else b(label) /*omit semi*/ 1020 1021 // If the pointers are equal, we are done (e.g., String[] elements). 1022 // This self-check enables sharing of secondary supertype arrays among 1023 // non-primary types such as array-of-interface. Otherwise, each such 1024 // type would need its own customized SSA. 1025 // We move this check to the front of the fast path because many 1026 // type checks are in fact trivially successful in this manner, 1027 // so we get a nicely predicted branch right at the start of the check. 1028 cmp(sub_klass, super_klass); 1029 br(Assembler::EQ, *L_success); 1030 1031 // Check the supertype display: 1032 if (must_load_sco) { 1033 ldrw(temp_reg, super_check_offset_addr); 1034 super_check_offset = RegisterOrConstant(temp_reg); 1035 } 1036 Address super_check_addr(sub_klass, super_check_offset); 1037 ldr(rscratch1, super_check_addr); 1038 cmp(super_klass, rscratch1); // load displayed supertype 1039 1040 // This check has worked decisively for primary supers. 1041 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1042 // (Secondary supers are interfaces and very deeply nested subtypes.) 1043 // This works in the same check above because of a tricky aliasing 1044 // between the super_cache and the primary super display elements. 1045 // (The 'super_check_addr' can address either, as the case requires.) 1046 // Note that the cache is updated below if it does not help us find 1047 // what we need immediately. 1048 // So if it was a primary super, we can just fail immediately. 1049 // Otherwise, it's the slow path for us (no success at this point). 1050 1051 if (super_check_offset.is_register()) { 1052 br(Assembler::EQ, *L_success); 1053 cmp(super_check_offset.as_register(), sc_offset); 1054 if (L_failure == &L_fallthrough) { 1055 br(Assembler::EQ, *L_slow_path); 1056 } else { 1057 br(Assembler::NE, *L_failure); 1058 final_jmp(*L_slow_path); 1059 } 1060 } else if (super_check_offset.as_constant() == sc_offset) { 1061 // Need a slow path; fast failure is impossible. 1062 if (L_slow_path == &L_fallthrough) { 1063 br(Assembler::EQ, *L_success); 1064 } else { 1065 br(Assembler::NE, *L_slow_path); 1066 final_jmp(*L_success); 1067 } 1068 } else { 1069 // No slow path; it's a fast decision. 1070 if (L_failure == &L_fallthrough) { 1071 br(Assembler::EQ, *L_success); 1072 } else { 1073 br(Assembler::NE, *L_failure); 1074 final_jmp(*L_success); 1075 } 1076 } 1077 1078 bind(L_fallthrough); 1079 1080 #undef final_jmp 1081 } 1082 1083 // These two are taken from x86, but they look generally useful 1084 1085 // scans count pointer sized words at [addr] for occurence of value, 1086 // generic 1087 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1088 Register scratch) { 1089 Label Lloop, Lexit; 1090 cbz(count, Lexit); 1091 bind(Lloop); 1092 ldr(scratch, post(addr, wordSize)); 1093 cmp(value, scratch); 1094 br(EQ, Lexit); 1095 sub(count, count, 1); 1096 cbnz(count, Lloop); 1097 bind(Lexit); 1098 } 1099 1100 // scans count 4 byte words at [addr] for occurence of value, 1101 // generic 1102 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1103 Register scratch) { 1104 Label Lloop, Lexit; 1105 cbz(count, Lexit); 1106 bind(Lloop); 1107 ldrw(scratch, post(addr, wordSize)); 1108 cmpw(value, scratch); 1109 br(EQ, Lexit); 1110 sub(count, count, 1); 1111 cbnz(count, Lloop); 1112 bind(Lexit); 1113 } 1114 1115 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1116 Register super_klass, 1117 Register temp_reg, 1118 Register temp2_reg, 1119 Label* L_success, 1120 Label* L_failure, 1121 bool set_cond_codes) { 1122 assert_different_registers(sub_klass, super_klass, temp_reg); 1123 if (temp2_reg != noreg) 1124 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1125 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1126 1127 Label L_fallthrough; 1128 int label_nulls = 0; 1129 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1130 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1131 assert(label_nulls <= 1, "at most one NULL in the batch"); 1132 1133 // a couple of useful fields in sub_klass: 1134 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1135 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1136 Address secondary_supers_addr(sub_klass, ss_offset); 1137 Address super_cache_addr( sub_klass, sc_offset); 1138 1139 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1140 1141 // Do a linear scan of the secondary super-klass chain. 1142 // This code is rarely used, so simplicity is a virtue here. 1143 // The repne_scan instruction uses fixed registers, which we must spill. 1144 // Don't worry too much about pre-existing connections with the input regs. 1145 1146 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1147 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1148 1149 // Get super_klass value into r0 (even if it was in r5 or r2). 1150 RegSet pushed_registers; 1151 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1152 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1153 1154 if (super_klass != r0 || UseCompressedOops) { 1155 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1156 } 1157 1158 push(pushed_registers, sp); 1159 1160 #ifndef PRODUCT 1161 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1162 Address pst_counter_addr(rscratch2); 1163 ldr(rscratch1, pst_counter_addr); 1164 add(rscratch1, rscratch1, 1); 1165 str(rscratch1, pst_counter_addr); 1166 #endif //PRODUCT 1167 1168 // We will consult the secondary-super array. 1169 ldr(r5, secondary_supers_addr); 1170 // Load the array length. 1171 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1172 // Skip to start of data. 1173 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1174 1175 cmp(sp, zr); // Clear Z flag; SP is never zero 1176 // Scan R2 words at [R5] for an occurrence of R0. 1177 // Set NZ/Z based on last compare. 1178 repne_scan(r5, r0, r2, rscratch1); 1179 1180 // Unspill the temp. registers: 1181 pop(pushed_registers, sp); 1182 1183 br(Assembler::NE, *L_failure); 1184 1185 // Success. Cache the super we found and proceed in triumph. 1186 str(super_klass, super_cache_addr); 1187 1188 if (L_success != &L_fallthrough) { 1189 b(*L_success); 1190 } 1191 1192 #undef IS_A_TEMP 1193 1194 bind(L_fallthrough); 1195 } 1196 1197 1198 void MacroAssembler::verify_oop(Register reg, const char* s) { 1199 if (!VerifyOops) return; 1200 1201 // Pass register number to verify_oop_subroutine 1202 const char* b = NULL; 1203 { 1204 ResourceMark rm; 1205 stringStream ss; 1206 ss.print("verify_oop: %s: %s", reg->name(), s); 1207 b = code_string(ss.as_string()); 1208 } 1209 BLOCK_COMMENT("verify_oop {"); 1210 1211 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1212 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1213 1214 mov(r0, reg); 1215 mov(rscratch1, (address)b); 1216 1217 // call indirectly to solve generation ordering problem 1218 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1219 ldr(rscratch2, Address(rscratch2)); 1220 blr(rscratch2); 1221 1222 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1223 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1224 1225 BLOCK_COMMENT("} verify_oop"); 1226 } 1227 1228 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 1229 if (!VerifyOops) return; 1230 1231 const char* b = NULL; 1232 { 1233 ResourceMark rm; 1234 stringStream ss; 1235 ss.print("verify_oop_addr: %s", s); 1236 b = code_string(ss.as_string()); 1237 } 1238 BLOCK_COMMENT("verify_oop_addr {"); 1239 1240 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1241 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1242 1243 // addr may contain sp so we will have to adjust it based on the 1244 // pushes that we just did. 1245 if (addr.uses(sp)) { 1246 lea(r0, addr); 1247 ldr(r0, Address(r0, 4 * wordSize)); 1248 } else { 1249 ldr(r0, addr); 1250 } 1251 mov(rscratch1, (address)b); 1252 1253 // call indirectly to solve generation ordering problem 1254 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1255 ldr(rscratch2, Address(rscratch2)); 1256 blr(rscratch2); 1257 1258 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1259 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1260 1261 BLOCK_COMMENT("} verify_oop_addr"); 1262 } 1263 1264 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1265 int extra_slot_offset) { 1266 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1267 int stackElementSize = Interpreter::stackElementSize; 1268 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1269 #ifdef ASSERT 1270 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1271 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1272 #endif 1273 if (arg_slot.is_constant()) { 1274 return Address(esp, arg_slot.as_constant() * stackElementSize 1275 + offset); 1276 } else { 1277 add(rscratch1, esp, arg_slot.as_register(), 1278 ext::uxtx, exact_log2(stackElementSize)); 1279 return Address(rscratch1, offset); 1280 } 1281 } 1282 1283 void MacroAssembler::call_VM_leaf_base(address entry_point, 1284 int number_of_arguments, 1285 Label *retaddr) { 1286 call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr); 1287 } 1288 1289 void MacroAssembler::call_VM_leaf_base1(address entry_point, 1290 int number_of_gp_arguments, 1291 int number_of_fp_arguments, 1292 ret_type type, 1293 Label *retaddr) { 1294 Label E, L; 1295 1296 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1297 1298 // We add 1 to number_of_arguments because the thread in arg0 is 1299 // not counted 1300 mov(rscratch1, entry_point); 1301 blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type); 1302 if (retaddr) 1303 bind(*retaddr); 1304 1305 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1306 maybe_isb(); 1307 } 1308 1309 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1310 call_VM_leaf_base(entry_point, number_of_arguments); 1311 } 1312 1313 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1314 pass_arg0(this, arg_0); 1315 call_VM_leaf_base(entry_point, 1); 1316 } 1317 1318 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1319 pass_arg0(this, arg_0); 1320 pass_arg1(this, arg_1); 1321 call_VM_leaf_base(entry_point, 2); 1322 } 1323 1324 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1325 Register arg_1, Register arg_2) { 1326 pass_arg0(this, arg_0); 1327 pass_arg1(this, arg_1); 1328 pass_arg2(this, arg_2); 1329 call_VM_leaf_base(entry_point, 3); 1330 } 1331 1332 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1333 pass_arg0(this, arg_0); 1334 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1335 } 1336 1337 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1338 1339 assert(arg_0 != c_rarg1, "smashed arg"); 1340 pass_arg1(this, arg_1); 1341 pass_arg0(this, arg_0); 1342 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1343 } 1344 1345 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1346 assert(arg_0 != c_rarg2, "smashed arg"); 1347 assert(arg_1 != c_rarg2, "smashed arg"); 1348 pass_arg2(this, arg_2); 1349 assert(arg_0 != c_rarg1, "smashed arg"); 1350 pass_arg1(this, arg_1); 1351 pass_arg0(this, arg_0); 1352 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1353 } 1354 1355 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1356 assert(arg_0 != c_rarg3, "smashed arg"); 1357 assert(arg_1 != c_rarg3, "smashed arg"); 1358 assert(arg_2 != c_rarg3, "smashed arg"); 1359 pass_arg3(this, arg_3); 1360 assert(arg_0 != c_rarg2, "smashed arg"); 1361 assert(arg_1 != c_rarg2, "smashed arg"); 1362 pass_arg2(this, arg_2); 1363 assert(arg_0 != c_rarg1, "smashed arg"); 1364 pass_arg1(this, arg_1); 1365 pass_arg0(this, arg_0); 1366 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1367 } 1368 1369 void MacroAssembler::null_check(Register reg, int offset) { 1370 if (needs_explicit_null_check(offset)) { 1371 // provoke OS NULL exception if reg = NULL by 1372 // accessing M[reg] w/o changing any registers 1373 // NOTE: this is plenty to provoke a segv 1374 ldr(zr, Address(reg)); 1375 } else { 1376 // nothing to do, (later) access of M[reg + offset] 1377 // will provoke OS NULL exception if reg = NULL 1378 } 1379 } 1380 1381 // MacroAssembler protected routines needed to implement 1382 // public methods 1383 1384 void MacroAssembler::mov(Register r, Address dest) { 1385 code_section()->relocate(pc(), dest.rspec()); 1386 u_int64_t imm64 = (u_int64_t)dest.target(); 1387 movptr(r, imm64); 1388 } 1389 1390 // Move a constant pointer into r. In AArch64 mode the virtual 1391 // address space is 48 bits in size, so we only need three 1392 // instructions to create a patchable instruction sequence that can 1393 // reach anywhere. 1394 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1395 #ifndef PRODUCT 1396 { 1397 char buffer[64]; 1398 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1399 block_comment(buffer); 1400 } 1401 #endif 1402 assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); 1403 movz(r, imm64 & 0xffff); 1404 imm64 >>= 16; 1405 movk(r, imm64 & 0xffff, 16); 1406 imm64 >>= 16; 1407 movk(r, imm64 & 0xffff, 32); 1408 } 1409 1410 // Macro to mov replicated immediate to vector register. 1411 // Vd will get the following values for different arrangements in T 1412 // imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh 1413 // imm32 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1414 // imm32 == hex 0000efgh T4H: Vd = efghefghefghefgh 1415 // imm32 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1416 // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1417 // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1418 // T1D/T2D: invalid 1419 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { 1420 assert(T != T1D && T != T2D, "invalid arrangement"); 1421 if (T == T8B || T == T16B) { 1422 assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); 1423 movi(Vd, T, imm32 & 0xff, 0); 1424 return; 1425 } 1426 u_int32_t nimm32 = ~imm32; 1427 if (T == T4H || T == T8H) { 1428 assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); 1429 imm32 &= 0xffff; 1430 nimm32 &= 0xffff; 1431 } 1432 u_int32_t x = imm32; 1433 int movi_cnt = 0; 1434 int movn_cnt = 0; 1435 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } 1436 x = nimm32; 1437 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } 1438 if (movn_cnt < movi_cnt) imm32 = nimm32; 1439 unsigned lsl = 0; 1440 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1441 if (movn_cnt < movi_cnt) 1442 mvni(Vd, T, imm32 & 0xff, lsl); 1443 else 1444 movi(Vd, T, imm32 & 0xff, lsl); 1445 imm32 >>= 8; lsl += 8; 1446 while (imm32) { 1447 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1448 if (movn_cnt < movi_cnt) 1449 bici(Vd, T, imm32 & 0xff, lsl); 1450 else 1451 orri(Vd, T, imm32 & 0xff, lsl); 1452 lsl += 8; imm32 >>= 8; 1453 } 1454 } 1455 1456 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) 1457 { 1458 #ifndef PRODUCT 1459 { 1460 char buffer[64]; 1461 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1462 block_comment(buffer); 1463 } 1464 #endif 1465 if (operand_valid_for_logical_immediate(false, imm64)) { 1466 orr(dst, zr, imm64); 1467 } else { 1468 // we can use a combination of MOVZ or MOVN with 1469 // MOVK to build up the constant 1470 u_int64_t imm_h[4]; 1471 int zero_count = 0; 1472 int neg_count = 0; 1473 int i; 1474 for (i = 0; i < 4; i++) { 1475 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1476 if (imm_h[i] == 0) { 1477 zero_count++; 1478 } else if (imm_h[i] == 0xffffL) { 1479 neg_count++; 1480 } 1481 } 1482 if (zero_count == 4) { 1483 // one MOVZ will do 1484 movz(dst, 0); 1485 } else if (neg_count == 4) { 1486 // one MOVN will do 1487 movn(dst, 0); 1488 } else if (zero_count == 3) { 1489 for (i = 0; i < 4; i++) { 1490 if (imm_h[i] != 0L) { 1491 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1492 break; 1493 } 1494 } 1495 } else if (neg_count == 3) { 1496 // one MOVN will do 1497 for (int i = 0; i < 4; i++) { 1498 if (imm_h[i] != 0xffffL) { 1499 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1500 break; 1501 } 1502 } 1503 } else if (zero_count == 2) { 1504 // one MOVZ and one MOVK will do 1505 for (i = 0; i < 3; i++) { 1506 if (imm_h[i] != 0L) { 1507 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1508 i++; 1509 break; 1510 } 1511 } 1512 for (;i < 4; i++) { 1513 if (imm_h[i] != 0L) { 1514 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1515 } 1516 } 1517 } else if (neg_count == 2) { 1518 // one MOVN and one MOVK will do 1519 for (i = 0; i < 4; i++) { 1520 if (imm_h[i] != 0xffffL) { 1521 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1522 i++; 1523 break; 1524 } 1525 } 1526 for (;i < 4; i++) { 1527 if (imm_h[i] != 0xffffL) { 1528 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1529 } 1530 } 1531 } else if (zero_count == 1) { 1532 // one MOVZ and two MOVKs will do 1533 for (i = 0; i < 4; i++) { 1534 if (imm_h[i] != 0L) { 1535 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1536 i++; 1537 break; 1538 } 1539 } 1540 for (;i < 4; i++) { 1541 if (imm_h[i] != 0x0L) { 1542 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1543 } 1544 } 1545 } else if (neg_count == 1) { 1546 // one MOVN and two MOVKs will do 1547 for (i = 0; i < 4; i++) { 1548 if (imm_h[i] != 0xffffL) { 1549 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1550 i++; 1551 break; 1552 } 1553 } 1554 for (;i < 4; i++) { 1555 if (imm_h[i] != 0xffffL) { 1556 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1557 } 1558 } 1559 } else { 1560 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1561 movz(dst, (u_int32_t)imm_h[0], 0); 1562 for (i = 1; i < 4; i++) { 1563 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1564 } 1565 } 1566 } 1567 } 1568 1569 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) 1570 { 1571 #ifndef PRODUCT 1572 { 1573 char buffer[64]; 1574 snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32); 1575 block_comment(buffer); 1576 } 1577 #endif 1578 if (operand_valid_for_logical_immediate(true, imm32)) { 1579 orrw(dst, zr, imm32); 1580 } else { 1581 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1582 // constant 1583 u_int32_t imm_h[2]; 1584 imm_h[0] = imm32 & 0xffff; 1585 imm_h[1] = ((imm32 >> 16) & 0xffff); 1586 if (imm_h[0] == 0) { 1587 movzw(dst, imm_h[1], 16); 1588 } else if (imm_h[0] == 0xffff) { 1589 movnw(dst, imm_h[1] ^ 0xffff, 16); 1590 } else if (imm_h[1] == 0) { 1591 movzw(dst, imm_h[0], 0); 1592 } else if (imm_h[1] == 0xffff) { 1593 movnw(dst, imm_h[0] ^ 0xffff, 0); 1594 } else { 1595 // use a MOVZ and MOVK (makes it easier to debug) 1596 movzw(dst, imm_h[0], 0); 1597 movkw(dst, imm_h[1], 16); 1598 } 1599 } 1600 } 1601 1602 // Form an address from base + offset in Rd. Rd may or may 1603 // not actually be used: you must use the Address that is returned. 1604 // It is up to you to ensure that the shift provided matches the size 1605 // of your data. 1606 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { 1607 if (Address::offset_ok_for_immed(byte_offset, shift)) 1608 // It fits; no need for any heroics 1609 return Address(base, byte_offset); 1610 1611 // Don't do anything clever with negative or misaligned offsets 1612 unsigned mask = (1 << shift) - 1; 1613 if (byte_offset < 0 || byte_offset & mask) { 1614 mov(Rd, byte_offset); 1615 add(Rd, base, Rd); 1616 return Address(Rd); 1617 } 1618 1619 // See if we can do this with two 12-bit offsets 1620 { 1621 unsigned long word_offset = byte_offset >> shift; 1622 unsigned long masked_offset = word_offset & 0xfff000; 1623 if (Address::offset_ok_for_immed(word_offset - masked_offset) 1624 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1625 add(Rd, base, masked_offset << shift); 1626 word_offset -= masked_offset; 1627 return Address(Rd, word_offset << shift); 1628 } 1629 } 1630 1631 // Do it the hard way 1632 mov(Rd, byte_offset); 1633 add(Rd, base, Rd); 1634 return Address(Rd); 1635 } 1636 1637 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) { 1638 Label retry_load; 1639 bind(retry_load); 1640 // flush and load exclusive from the memory location 1641 ldxrw(tmp, counter_addr); 1642 addw(tmp, tmp, 1); 1643 // if we store+flush with no intervening write tmp wil be zero 1644 stxrw(tmp, tmp, counter_addr); 1645 cbnzw(tmp, retry_load); 1646 } 1647 1648 1649 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1650 bool want_remainder, Register scratch) 1651 { 1652 // Full implementation of Java idiv and irem. The function 1653 // returns the (pc) offset of the div instruction - may be needed 1654 // for implicit exceptions. 1655 // 1656 // constraint : ra/rb =/= scratch 1657 // normal case 1658 // 1659 // input : ra: dividend 1660 // rb: divisor 1661 // 1662 // result: either 1663 // quotient (= ra idiv rb) 1664 // remainder (= ra irem rb) 1665 1666 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1667 1668 int idivl_offset = offset(); 1669 if (! want_remainder) { 1670 sdivw(result, ra, rb); 1671 } else { 1672 sdivw(scratch, ra, rb); 1673 Assembler::msubw(result, scratch, rb, ra); 1674 } 1675 1676 return idivl_offset; 1677 } 1678 1679 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1680 bool want_remainder, Register scratch) 1681 { 1682 // Full implementation of Java ldiv and lrem. The function 1683 // returns the (pc) offset of the div instruction - may be needed 1684 // for implicit exceptions. 1685 // 1686 // constraint : ra/rb =/= scratch 1687 // normal case 1688 // 1689 // input : ra: dividend 1690 // rb: divisor 1691 // 1692 // result: either 1693 // quotient (= ra idiv rb) 1694 // remainder (= ra irem rb) 1695 1696 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1697 1698 int idivq_offset = offset(); 1699 if (! want_remainder) { 1700 sdiv(result, ra, rb); 1701 } else { 1702 sdiv(scratch, ra, rb); 1703 Assembler::msub(result, scratch, rb, ra); 1704 } 1705 1706 return idivq_offset; 1707 } 1708 1709 // MacroAssembler routines found actually to be needed 1710 1711 void MacroAssembler::push(Register src) 1712 { 1713 str(src, Address(pre(esp, -1 * wordSize))); 1714 } 1715 1716 void MacroAssembler::pop(Register dst) 1717 { 1718 ldr(dst, Address(post(esp, 1 * wordSize))); 1719 } 1720 1721 // Note: load_unsigned_short used to be called load_unsigned_word. 1722 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1723 int off = offset(); 1724 ldrh(dst, src); 1725 return off; 1726 } 1727 1728 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1729 int off = offset(); 1730 ldrb(dst, src); 1731 return off; 1732 } 1733 1734 int MacroAssembler::load_signed_short(Register dst, Address src) { 1735 int off = offset(); 1736 ldrsh(dst, src); 1737 return off; 1738 } 1739 1740 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1741 int off = offset(); 1742 ldrsb(dst, src); 1743 return off; 1744 } 1745 1746 int MacroAssembler::load_signed_short32(Register dst, Address src) { 1747 int off = offset(); 1748 ldrshw(dst, src); 1749 return off; 1750 } 1751 1752 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 1753 int off = offset(); 1754 ldrsbw(dst, src); 1755 return off; 1756 } 1757 1758 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1759 switch (size_in_bytes) { 1760 case 8: ldr(dst, src); break; 1761 case 4: ldrw(dst, src); break; 1762 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1763 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1764 default: ShouldNotReachHere(); 1765 } 1766 } 1767 1768 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1769 switch (size_in_bytes) { 1770 case 8: str(src, dst); break; 1771 case 4: strw(src, dst); break; 1772 case 2: strh(src, dst); break; 1773 case 1: strb(src, dst); break; 1774 default: ShouldNotReachHere(); 1775 } 1776 } 1777 1778 void MacroAssembler::decrementw(Register reg, int value) 1779 { 1780 if (value < 0) { incrementw(reg, -value); return; } 1781 if (value == 0) { return; } 1782 if (value < (1 << 12)) { subw(reg, reg, value); return; } 1783 /* else */ { 1784 guarantee(reg != rscratch2, "invalid dst for register decrement"); 1785 movw(rscratch2, (unsigned)value); 1786 subw(reg, reg, rscratch2); 1787 } 1788 } 1789 1790 void MacroAssembler::decrement(Register reg, int value) 1791 { 1792 if (value < 0) { increment(reg, -value); return; } 1793 if (value == 0) { return; } 1794 if (value < (1 << 12)) { sub(reg, reg, value); return; } 1795 /* else */ { 1796 assert(reg != rscratch2, "invalid dst for register decrement"); 1797 mov(rscratch2, (unsigned long)value); 1798 sub(reg, reg, rscratch2); 1799 } 1800 } 1801 1802 void MacroAssembler::decrementw(Address dst, int value) 1803 { 1804 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 1805 ldrw(rscratch1, dst); 1806 decrementw(rscratch1, value); 1807 strw(rscratch1, dst); 1808 } 1809 1810 void MacroAssembler::decrement(Address dst, int value) 1811 { 1812 assert(!dst.uses(rscratch1), "invalid address for decrement"); 1813 ldr(rscratch1, dst); 1814 decrement(rscratch1, value); 1815 str(rscratch1, dst); 1816 } 1817 1818 void MacroAssembler::incrementw(Register reg, int value) 1819 { 1820 if (value < 0) { decrementw(reg, -value); return; } 1821 if (value == 0) { return; } 1822 if (value < (1 << 12)) { addw(reg, reg, value); return; } 1823 /* else */ { 1824 assert(reg != rscratch2, "invalid dst for register increment"); 1825 movw(rscratch2, (unsigned)value); 1826 addw(reg, reg, rscratch2); 1827 } 1828 } 1829 1830 void MacroAssembler::increment(Register reg, int value) 1831 { 1832 if (value < 0) { decrement(reg, -value); return; } 1833 if (value == 0) { return; } 1834 if (value < (1 << 12)) { add(reg, reg, value); return; } 1835 /* else */ { 1836 assert(reg != rscratch2, "invalid dst for register increment"); 1837 movw(rscratch2, (unsigned)value); 1838 add(reg, reg, rscratch2); 1839 } 1840 } 1841 1842 void MacroAssembler::incrementw(Address dst, int value) 1843 { 1844 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1845 ldrw(rscratch1, dst); 1846 incrementw(rscratch1, value); 1847 strw(rscratch1, dst); 1848 } 1849 1850 void MacroAssembler::increment(Address dst, int value) 1851 { 1852 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1853 ldr(rscratch1, dst); 1854 increment(rscratch1, value); 1855 str(rscratch1, dst); 1856 } 1857 1858 1859 void MacroAssembler::pusha() { 1860 push(0x7fffffff, sp); 1861 } 1862 1863 void MacroAssembler::popa() { 1864 pop(0x7fffffff, sp); 1865 } 1866 1867 // Push lots of registers in the bit set supplied. Don't push sp. 1868 // Return the number of words pushed 1869 int MacroAssembler::push(unsigned int bitset, Register stack) { 1870 int words_pushed = 0; 1871 1872 // Scan bitset to accumulate register pairs 1873 unsigned char regs[32]; 1874 int count = 0; 1875 for (int reg = 0; reg <= 30; reg++) { 1876 if (1 & bitset) 1877 regs[count++] = reg; 1878 bitset >>= 1; 1879 } 1880 regs[count++] = zr->encoding_nocheck(); 1881 count &= ~1; // Only push an even nuber of regs 1882 1883 if (count) { 1884 stp(as_Register(regs[0]), as_Register(regs[1]), 1885 Address(pre(stack, -count * wordSize))); 1886 words_pushed += 2; 1887 } 1888 for (int i = 2; i < count; i += 2) { 1889 stp(as_Register(regs[i]), as_Register(regs[i+1]), 1890 Address(stack, i * wordSize)); 1891 words_pushed += 2; 1892 } 1893 1894 assert(words_pushed == count, "oops, pushed != count"); 1895 1896 return count; 1897 } 1898 1899 int MacroAssembler::pop(unsigned int bitset, Register stack) { 1900 int words_pushed = 0; 1901 1902 // Scan bitset to accumulate register pairs 1903 unsigned char regs[32]; 1904 int count = 0; 1905 for (int reg = 0; reg <= 30; reg++) { 1906 if (1 & bitset) 1907 regs[count++] = reg; 1908 bitset >>= 1; 1909 } 1910 regs[count++] = zr->encoding_nocheck(); 1911 count &= ~1; 1912 1913 for (int i = 2; i < count; i += 2) { 1914 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 1915 Address(stack, i * wordSize)); 1916 words_pushed += 2; 1917 } 1918 if (count) { 1919 ldp(as_Register(regs[0]), as_Register(regs[1]), 1920 Address(post(stack, count * wordSize))); 1921 words_pushed += 2; 1922 } 1923 1924 assert(words_pushed == count, "oops, pushed != count"); 1925 1926 return count; 1927 } 1928 #ifdef ASSERT 1929 void MacroAssembler::verify_heapbase(const char* msg) { 1930 #if 0 1931 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 1932 assert (Universe::heap() != NULL, "java heap should be initialized"); 1933 if (CheckCompressedOops) { 1934 Label ok; 1935 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 1936 cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 1937 br(Assembler::EQ, ok); 1938 stop(msg); 1939 bind(ok); 1940 pop(1 << rscratch1->encoding(), sp); 1941 } 1942 #endif 1943 } 1944 #endif 1945 1946 void MacroAssembler::stop(const char* msg) { 1947 address ip = pc(); 1948 pusha(); 1949 mov(c_rarg0, (address)msg); 1950 mov(c_rarg1, (address)ip); 1951 mov(c_rarg2, sp); 1952 mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); 1953 // call(c_rarg3); 1954 blrt(c_rarg3, 3, 0, 1); 1955 hlt(0); 1956 } 1957 1958 // If a constant does not fit in an immediate field, generate some 1959 // number of MOV instructions and then perform the operation. 1960 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1961 add_sub_imm_insn insn1, 1962 add_sub_reg_insn insn2) { 1963 assert(Rd != zr, "Rd = zr and not setting flags?"); 1964 if (operand_valid_for_add_sub_immediate((int)imm)) { 1965 (this->*insn1)(Rd, Rn, imm); 1966 } else { 1967 if (uabs(imm) < (1 << 24)) { 1968 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 1969 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 1970 } else { 1971 assert_different_registers(Rd, Rn); 1972 mov(Rd, (uint64_t)imm); 1973 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1974 } 1975 } 1976 } 1977 1978 // Seperate vsn which sets the flags. Optimisations are more restricted 1979 // because we must set the flags correctly. 1980 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1981 add_sub_imm_insn insn1, 1982 add_sub_reg_insn insn2) { 1983 if (operand_valid_for_add_sub_immediate((int)imm)) { 1984 (this->*insn1)(Rd, Rn, imm); 1985 } else { 1986 assert_different_registers(Rd, Rn); 1987 assert(Rd != zr, "overflow in immediate operand"); 1988 mov(Rd, (uint64_t)imm); 1989 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1990 } 1991 } 1992 1993 1994 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 1995 if (increment.is_register()) { 1996 add(Rd, Rn, increment.as_register()); 1997 } else { 1998 add(Rd, Rn, increment.as_constant()); 1999 } 2000 } 2001 2002 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2003 if (increment.is_register()) { 2004 addw(Rd, Rn, increment.as_register()); 2005 } else { 2006 addw(Rd, Rn, increment.as_constant()); 2007 } 2008 } 2009 2010 void MacroAssembler::reinit_heapbase() 2011 { 2012 if (UseCompressedOops) { 2013 if (Universe::is_fully_initialized()) { 2014 mov(rheapbase, Universe::narrow_ptrs_base()); 2015 } else { 2016 lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 2017 ldr(rheapbase, Address(rheapbase)); 2018 } 2019 } 2020 } 2021 2022 // this simulates the behaviour of the x86 cmpxchg instruction using a 2023 // load linked/store conditional pair. we use the acquire/release 2024 // versions of these instructions so that we flush pending writes as 2025 // per Java semantics. 2026 2027 // n.b the x86 version assumes the old value to be compared against is 2028 // in rax and updates rax with the value located in memory if the 2029 // cmpxchg fails. we supply a register for the old value explicitly 2030 2031 // the aarch64 load linked/store conditional instructions do not 2032 // accept an offset. so, unlike x86, we must provide a plain register 2033 // to identify the memory word to be compared/exchanged rather than a 2034 // register+offset Address. 2035 2036 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2037 Label &succeed, Label *fail) { 2038 // oldv holds comparison value 2039 // newv holds value to write in exchange 2040 // addr identifies memory word to compare against/update 2041 // tmp returns 0/1 for success/failure 2042 Label retry_load, nope; 2043 2044 bind(retry_load); 2045 // flush and load exclusive from the memory location 2046 // and fail if it is not what we expect 2047 ldaxr(tmp, addr); 2048 cmp(tmp, oldv); 2049 br(Assembler::NE, nope); 2050 // if we store+flush with no intervening write tmp wil be zero 2051 stlxr(tmp, newv, addr); 2052 cbzw(tmp, succeed); 2053 // retry so we only ever return after a load fails to compare 2054 // ensures we don't return a stale value after a failed write. 2055 b(retry_load); 2056 // if the memory word differs we return it in oldv and signal a fail 2057 bind(nope); 2058 membar(AnyAny); 2059 mov(oldv, tmp); 2060 if (fail) 2061 b(*fail); 2062 } 2063 2064 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2065 Label &succeed, Label *fail) { 2066 // oldv holds comparison value 2067 // newv holds value to write in exchange 2068 // addr identifies memory word to compare against/update 2069 // tmp returns 0/1 for success/failure 2070 Label retry_load, nope; 2071 2072 bind(retry_load); 2073 // flush and load exclusive from the memory location 2074 // and fail if it is not what we expect 2075 ldaxrw(tmp, addr); 2076 cmp(tmp, oldv); 2077 br(Assembler::NE, nope); 2078 // if we store+flush with no intervening write tmp wil be zero 2079 stlxrw(tmp, newv, addr); 2080 cbzw(tmp, succeed); 2081 // retry so we only ever return after a load fails to compare 2082 // ensures we don't return a stale value after a failed write. 2083 b(retry_load); 2084 // if the memory word differs we return it in oldv and signal a fail 2085 bind(nope); 2086 membar(AnyAny); 2087 mov(oldv, tmp); 2088 if (fail) 2089 b(*fail); 2090 } 2091 2092 static bool different(Register a, RegisterOrConstant b, Register c) { 2093 if (b.is_constant()) 2094 return a != c; 2095 else 2096 return a != b.as_register() && a != c && b.as_register() != c; 2097 } 2098 2099 #define ATOMIC_OP(LDXR, OP, STXR) \ 2100 void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ 2101 Register result = rscratch2; \ 2102 if (prev->is_valid()) \ 2103 result = different(prev, incr, addr) ? prev : rscratch2; \ 2104 \ 2105 Label retry_load; \ 2106 bind(retry_load); \ 2107 LDXR(result, addr); \ 2108 OP(rscratch1, result, incr); \ 2109 STXR(rscratch1, rscratch1, addr); \ 2110 cbnzw(rscratch1, retry_load); \ 2111 if (prev->is_valid() && prev != result) \ 2112 mov(prev, result); \ 2113 } 2114 2115 ATOMIC_OP(ldxr, add, stxr) 2116 ATOMIC_OP(ldxrw, addw, stxrw) 2117 2118 #undef ATOMIC_OP 2119 2120 #define ATOMIC_XCHG(OP, LDXR, STXR) \ 2121 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2122 Register result = rscratch2; \ 2123 if (prev->is_valid()) \ 2124 result = different(prev, newv, addr) ? prev : rscratch2; \ 2125 \ 2126 Label retry_load; \ 2127 bind(retry_load); \ 2128 LDXR(result, addr); \ 2129 STXR(rscratch1, newv, addr); \ 2130 cbnzw(rscratch1, retry_load); \ 2131 if (prev->is_valid() && prev != result) \ 2132 mov(prev, result); \ 2133 } 2134 2135 ATOMIC_XCHG(xchg, ldxr, stxr) 2136 ATOMIC_XCHG(xchgw, ldxrw, stxrw) 2137 2138 #undef ATOMIC_XCHG 2139 2140 void MacroAssembler::incr_allocated_bytes(Register thread, 2141 Register var_size_in_bytes, 2142 int con_size_in_bytes, 2143 Register t1) { 2144 if (!thread->is_valid()) { 2145 thread = rthread; 2146 } 2147 assert(t1->is_valid(), "need temp reg"); 2148 2149 ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2150 if (var_size_in_bytes->is_valid()) { 2151 add(t1, t1, var_size_in_bytes); 2152 } else { 2153 add(t1, t1, con_size_in_bytes); 2154 } 2155 str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2156 } 2157 2158 #ifndef PRODUCT 2159 extern "C" void findpc(intptr_t x); 2160 #endif 2161 2162 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2163 { 2164 // In order to get locks to work, we need to fake a in_VM state 2165 if (ShowMessageBoxOnError ) { 2166 JavaThread* thread = JavaThread::current(); 2167 JavaThreadState saved_state = thread->thread_state(); 2168 thread->set_thread_state(_thread_in_vm); 2169 #ifndef PRODUCT 2170 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2171 ttyLocker ttyl; 2172 BytecodeCounter::print(); 2173 } 2174 #endif 2175 if (os::message_box(msg, "Execution stopped, print registers?")) { 2176 ttyLocker ttyl; 2177 tty->print_cr(" pc = 0x%016lx", pc); 2178 #ifndef PRODUCT 2179 tty->cr(); 2180 findpc(pc); 2181 tty->cr(); 2182 #endif 2183 tty->print_cr(" r0 = 0x%016lx", regs[0]); 2184 tty->print_cr(" r1 = 0x%016lx", regs[1]); 2185 tty->print_cr(" r2 = 0x%016lx", regs[2]); 2186 tty->print_cr(" r3 = 0x%016lx", regs[3]); 2187 tty->print_cr(" r4 = 0x%016lx", regs[4]); 2188 tty->print_cr(" r5 = 0x%016lx", regs[5]); 2189 tty->print_cr(" r6 = 0x%016lx", regs[6]); 2190 tty->print_cr(" r7 = 0x%016lx", regs[7]); 2191 tty->print_cr(" r8 = 0x%016lx", regs[8]); 2192 tty->print_cr(" r9 = 0x%016lx", regs[9]); 2193 tty->print_cr("r10 = 0x%016lx", regs[10]); 2194 tty->print_cr("r11 = 0x%016lx", regs[11]); 2195 tty->print_cr("r12 = 0x%016lx", regs[12]); 2196 tty->print_cr("r13 = 0x%016lx", regs[13]); 2197 tty->print_cr("r14 = 0x%016lx", regs[14]); 2198 tty->print_cr("r15 = 0x%016lx", regs[15]); 2199 tty->print_cr("r16 = 0x%016lx", regs[16]); 2200 tty->print_cr("r17 = 0x%016lx", regs[17]); 2201 tty->print_cr("r18 = 0x%016lx", regs[18]); 2202 tty->print_cr("r19 = 0x%016lx", regs[19]); 2203 tty->print_cr("r20 = 0x%016lx", regs[20]); 2204 tty->print_cr("r21 = 0x%016lx", regs[21]); 2205 tty->print_cr("r22 = 0x%016lx", regs[22]); 2206 tty->print_cr("r23 = 0x%016lx", regs[23]); 2207 tty->print_cr("r24 = 0x%016lx", regs[24]); 2208 tty->print_cr("r25 = 0x%016lx", regs[25]); 2209 tty->print_cr("r26 = 0x%016lx", regs[26]); 2210 tty->print_cr("r27 = 0x%016lx", regs[27]); 2211 tty->print_cr("r28 = 0x%016lx", regs[28]); 2212 tty->print_cr("r30 = 0x%016lx", regs[30]); 2213 tty->print_cr("r31 = 0x%016lx", regs[31]); 2214 BREAKPOINT; 2215 } 2216 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 2217 } else { 2218 ttyLocker ttyl; 2219 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 2220 msg); 2221 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 2222 } 2223 } 2224 2225 #ifdef BUILTIN_SIM 2226 // routine to generate an x86 prolog for a stub function which 2227 // bootstraps into the generated ARM code which directly follows the 2228 // stub 2229 // 2230 // the argument encodes the number of general and fp registers 2231 // passed by the caller and the callng convention (currently just 2232 // the number of general registers and assumes C argument passing) 2233 2234 extern "C" { 2235 int aarch64_stub_prolog_size(); 2236 void aarch64_stub_prolog(); 2237 void aarch64_prolog(); 2238 } 2239 2240 void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, 2241 address *prolog_ptr) 2242 { 2243 int calltype = (((ret_type & 0x3) << 8) | 2244 ((fp_arg_count & 0xf) << 4) | 2245 (gp_arg_count & 0xf)); 2246 2247 // the addresses for the x86 to ARM entry code we need to use 2248 address start = pc(); 2249 // printf("start = %lx\n", start); 2250 int byteCount = aarch64_stub_prolog_size(); 2251 // printf("byteCount = %x\n", byteCount); 2252 int instructionCount = (byteCount + 3)/ 4; 2253 // printf("instructionCount = %x\n", instructionCount); 2254 for (int i = 0; i < instructionCount; i++) { 2255 nop(); 2256 } 2257 2258 memcpy(start, (void*)aarch64_stub_prolog, byteCount); 2259 2260 // write the address of the setup routine and the call format at the 2261 // end of into the copied code 2262 u_int64_t *patch_end = (u_int64_t *)(start + byteCount); 2263 if (prolog_ptr) 2264 patch_end[-2] = (u_int64_t)prolog_ptr; 2265 patch_end[-1] = calltype; 2266 } 2267 #endif 2268 2269 void MacroAssembler::push_CPU_state() { 2270 push(0x3fffffff, sp); // integer registers except lr & sp 2271 2272 for (int i = 30; i >= 0; i -= 2) 2273 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2274 Address(pre(sp, -2 * wordSize))); 2275 } 2276 2277 void MacroAssembler::pop_CPU_state() { 2278 for (int i = 0; i < 32; i += 2) 2279 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2280 Address(post(sp, 2 * wordSize))); 2281 2282 pop(0x3fffffff, sp); // integer registers except lr & sp 2283 } 2284 2285 /** 2286 * Helpers for multiply_to_len(). 2287 */ 2288 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 2289 Register src1, Register src2) { 2290 adds(dest_lo, dest_lo, src1); 2291 adc(dest_hi, dest_hi, zr); 2292 adds(dest_lo, dest_lo, src2); 2293 adc(final_dest_hi, dest_hi, zr); 2294 } 2295 2296 // Generate an address from (r + r1 extend offset). "size" is the 2297 // size of the operand. The result may be in rscratch2. 2298 Address MacroAssembler::offsetted_address(Register r, Register r1, 2299 Address::extend ext, int offset, int size) { 2300 if (offset || (ext.shift() % size != 0)) { 2301 lea(rscratch2, Address(r, r1, ext)); 2302 return Address(rscratch2, offset); 2303 } else { 2304 return Address(r, r1, ext); 2305 } 2306 } 2307 2308 /** 2309 * Multiply 64 bit by 64 bit first loop. 2310 */ 2311 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2312 Register y, Register y_idx, Register z, 2313 Register carry, Register product, 2314 Register idx, Register kdx) { 2315 // 2316 // jlong carry, x[], y[], z[]; 2317 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2318 // huge_128 product = y[idx] * x[xstart] + carry; 2319 // z[kdx] = (jlong)product; 2320 // carry = (jlong)(product >>> 64); 2321 // } 2322 // z[xstart] = carry; 2323 // 2324 2325 Label L_first_loop, L_first_loop_exit; 2326 Label L_one_x, L_one_y, L_multiply; 2327 2328 subsw(xstart, xstart, 1); 2329 br(Assembler::MI, L_one_x); 2330 2331 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 2332 ldr(x_xstart, Address(rscratch1)); 2333 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 2334 2335 bind(L_first_loop); 2336 subsw(idx, idx, 1); 2337 br(Assembler::MI, L_first_loop_exit); 2338 subsw(idx, idx, 1); 2339 br(Assembler::MI, L_one_y); 2340 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2341 ldr(y_idx, Address(rscratch1)); 2342 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 2343 bind(L_multiply); 2344 2345 // AArch64 has a multiply-accumulate instruction that we can't use 2346 // here because it has no way to process carries, so we have to use 2347 // separate add and adc instructions. Bah. 2348 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 2349 mul(product, x_xstart, y_idx); 2350 adds(product, product, carry); 2351 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 2352 2353 subw(kdx, kdx, 2); 2354 ror(product, product, 32); // back to big-endian 2355 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 2356 2357 b(L_first_loop); 2358 2359 bind(L_one_y); 2360 ldrw(y_idx, Address(y, 0)); 2361 b(L_multiply); 2362 2363 bind(L_one_x); 2364 ldrw(x_xstart, Address(x, 0)); 2365 b(L_first_loop); 2366 2367 bind(L_first_loop_exit); 2368 } 2369 2370 /** 2371 * Multiply 128 bit by 128. Unrolled inner loop. 2372 * 2373 */ 2374 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 2375 Register carry, Register carry2, 2376 Register idx, Register jdx, 2377 Register yz_idx1, Register yz_idx2, 2378 Register tmp, Register tmp3, Register tmp4, 2379 Register tmp6, Register product_hi) { 2380 2381 // jlong carry, x[], y[], z[]; 2382 // int kdx = ystart+1; 2383 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 2384 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 2385 // jlong carry2 = (jlong)(tmp3 >>> 64); 2386 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 2387 // carry = (jlong)(tmp4 >>> 64); 2388 // z[kdx+idx+1] = (jlong)tmp3; 2389 // z[kdx+idx] = (jlong)tmp4; 2390 // } 2391 // idx += 2; 2392 // if (idx > 0) { 2393 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 2394 // z[kdx+idx] = (jlong)yz_idx1; 2395 // carry = (jlong)(yz_idx1 >>> 64); 2396 // } 2397 // 2398 2399 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 2400 2401 lsrw(jdx, idx, 2); 2402 2403 bind(L_third_loop); 2404 2405 subsw(jdx, jdx, 1); 2406 br(Assembler::MI, L_third_loop_exit); 2407 subw(idx, idx, 4); 2408 2409 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2410 2411 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 2412 2413 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2414 2415 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 2416 ror(yz_idx2, yz_idx2, 32); 2417 2418 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 2419 2420 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2421 umulh(tmp4, product_hi, yz_idx1); 2422 2423 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 2424 ror(rscratch2, rscratch2, 32); 2425 2426 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 2427 umulh(carry2, product_hi, yz_idx2); 2428 2429 // propagate sum of both multiplications into carry:tmp4:tmp3 2430 adds(tmp3, tmp3, carry); 2431 adc(tmp4, tmp4, zr); 2432 adds(tmp3, tmp3, rscratch1); 2433 adcs(tmp4, tmp4, tmp); 2434 adc(carry, carry2, zr); 2435 adds(tmp4, tmp4, rscratch2); 2436 adc(carry, carry, zr); 2437 2438 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 2439 ror(tmp4, tmp4, 32); 2440 stp(tmp4, tmp3, Address(tmp6, 0)); 2441 2442 b(L_third_loop); 2443 bind (L_third_loop_exit); 2444 2445 andw (idx, idx, 0x3); 2446 cbz(idx, L_post_third_loop_done); 2447 2448 Label L_check_1; 2449 subsw(idx, idx, 2); 2450 br(Assembler::MI, L_check_1); 2451 2452 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2453 ldr(yz_idx1, Address(rscratch1, 0)); 2454 ror(yz_idx1, yz_idx1, 32); 2455 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2456 umulh(tmp4, product_hi, yz_idx1); 2457 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2458 ldr(yz_idx2, Address(rscratch1, 0)); 2459 ror(yz_idx2, yz_idx2, 32); 2460 2461 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 2462 2463 ror(tmp3, tmp3, 32); 2464 str(tmp3, Address(rscratch1, 0)); 2465 2466 bind (L_check_1); 2467 2468 andw (idx, idx, 0x1); 2469 subsw(idx, idx, 1); 2470 br(Assembler::MI, L_post_third_loop_done); 2471 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2472 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 2473 umulh(carry2, tmp4, product_hi); 2474 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2475 2476 add2_with_carry(carry2, tmp3, tmp4, carry); 2477 2478 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2479 extr(carry, carry2, tmp3, 32); 2480 2481 bind(L_post_third_loop_done); 2482 } 2483 2484 /** 2485 * Code for BigInteger::multiplyToLen() instrinsic. 2486 * 2487 * r0: x 2488 * r1: xlen 2489 * r2: y 2490 * r3: ylen 2491 * r4: z 2492 * r5: zlen 2493 * r10: tmp1 2494 * r11: tmp2 2495 * r12: tmp3 2496 * r13: tmp4 2497 * r14: tmp5 2498 * r15: tmp6 2499 * r16: tmp7 2500 * 2501 */ 2502 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 2503 Register z, Register zlen, 2504 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 2505 Register tmp5, Register tmp6, Register product_hi) { 2506 2507 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 2508 2509 const Register idx = tmp1; 2510 const Register kdx = tmp2; 2511 const Register xstart = tmp3; 2512 2513 const Register y_idx = tmp4; 2514 const Register carry = tmp5; 2515 const Register product = xlen; 2516 const Register x_xstart = zlen; // reuse register 2517 2518 // First Loop. 2519 // 2520 // final static long LONG_MASK = 0xffffffffL; 2521 // int xstart = xlen - 1; 2522 // int ystart = ylen - 1; 2523 // long carry = 0; 2524 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2525 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 2526 // z[kdx] = (int)product; 2527 // carry = product >>> 32; 2528 // } 2529 // z[xstart] = (int)carry; 2530 // 2531 2532 movw(idx, ylen); // idx = ylen; 2533 movw(kdx, zlen); // kdx = xlen+ylen; 2534 mov(carry, zr); // carry = 0; 2535 2536 Label L_done; 2537 2538 movw(xstart, xlen); 2539 subsw(xstart, xstart, 1); 2540 br(Assembler::MI, L_done); 2541 2542 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 2543 2544 Label L_second_loop; 2545 cbzw(kdx, L_second_loop); 2546 2547 Label L_carry; 2548 subw(kdx, kdx, 1); 2549 cbzw(kdx, L_carry); 2550 2551 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2552 lsr(carry, carry, 32); 2553 subw(kdx, kdx, 1); 2554 2555 bind(L_carry); 2556 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2557 2558 // Second and third (nested) loops. 2559 // 2560 // for (int i = xstart-1; i >= 0; i--) { // Second loop 2561 // carry = 0; 2562 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 2563 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 2564 // (z[k] & LONG_MASK) + carry; 2565 // z[k] = (int)product; 2566 // carry = product >>> 32; 2567 // } 2568 // z[i] = (int)carry; 2569 // } 2570 // 2571 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 2572 2573 const Register jdx = tmp1; 2574 2575 bind(L_second_loop); 2576 mov(carry, zr); // carry = 0; 2577 movw(jdx, ylen); // j = ystart+1 2578 2579 subsw(xstart, xstart, 1); // i = xstart-1; 2580 br(Assembler::MI, L_done); 2581 2582 str(z, Address(pre(sp, -4 * wordSize))); 2583 2584 Label L_last_x; 2585 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 2586 subsw(xstart, xstart, 1); // i = xstart-1; 2587 br(Assembler::MI, L_last_x); 2588 2589 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 2590 ldr(product_hi, Address(rscratch1)); 2591 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 2592 2593 Label L_third_loop_prologue; 2594 bind(L_third_loop_prologue); 2595 2596 str(ylen, Address(sp, wordSize)); 2597 stp(x, xstart, Address(sp, 2 * wordSize)); 2598 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 2599 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 2600 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 2601 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 2602 2603 addw(tmp3, xlen, 1); 2604 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2605 subsw(tmp3, tmp3, 1); 2606 br(Assembler::MI, L_done); 2607 2608 lsr(carry, carry, 32); 2609 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2610 b(L_second_loop); 2611 2612 // Next infrequent code is moved outside loops. 2613 bind(L_last_x); 2614 ldrw(product_hi, Address(x, 0)); 2615 b(L_third_loop_prologue); 2616 2617 bind(L_done); 2618 } 2619 2620 /** 2621 * Emits code to update CRC-32 with a byte value according to constants in table 2622 * 2623 * @param [in,out]crc Register containing the crc. 2624 * @param [in]val Register containing the byte to fold into the CRC. 2625 * @param [in]table Register containing the table of crc constants. 2626 * 2627 * uint32_t crc; 2628 * val = crc_table[(val ^ crc) & 0xFF]; 2629 * crc = val ^ (crc >> 8); 2630 * 2631 */ 2632 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 2633 eor(val, val, crc); 2634 andr(val, val, 0xff); 2635 ldrw(val, Address(table, val, Address::lsl(2))); 2636 eor(crc, val, crc, Assembler::LSR, 8); 2637 } 2638 2639 /** 2640 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 2641 * 2642 * @param [in,out]crc Register containing the crc. 2643 * @param [in]v Register containing the 32-bit to fold into the CRC. 2644 * @param [in]table0 Register containing table 0 of crc constants. 2645 * @param [in]table1 Register containing table 1 of crc constants. 2646 * @param [in]table2 Register containing table 2 of crc constants. 2647 * @param [in]table3 Register containing table 3 of crc constants. 2648 * 2649 * uint32_t crc; 2650 * v = crc ^ v 2651 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 2652 * 2653 */ 2654 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 2655 Register table0, Register table1, Register table2, Register table3, 2656 bool upper) { 2657 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 2658 uxtb(tmp, v); 2659 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 2660 ubfx(tmp, v, 8, 8); 2661 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 2662 eor(crc, crc, tmp); 2663 ubfx(tmp, v, 16, 8); 2664 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 2665 eor(crc, crc, tmp); 2666 ubfx(tmp, v, 24, 8); 2667 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 2668 eor(crc, crc, tmp); 2669 } 2670 2671 /** 2672 * @param crc register containing existing CRC (32-bit) 2673 * @param buf register pointing to input byte buffer (byte*) 2674 * @param len register containing number of bytes 2675 * @param table register that will contain address of CRC table 2676 * @param tmp scratch register 2677 */ 2678 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 2679 Register table0, Register table1, Register table2, Register table3, 2680 Register tmp, Register tmp2, Register tmp3) { 2681 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 2682 unsigned long offset; 2683 2684 ornw(crc, zr, crc); 2685 2686 if (UseCRC32) { 2687 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2688 2689 subs(len, len, 64); 2690 br(Assembler::GE, CRC_by64_loop); 2691 adds(len, len, 64-4); 2692 br(Assembler::GE, CRC_by4_loop); 2693 adds(len, len, 4); 2694 br(Assembler::GT, CRC_by1_loop); 2695 b(L_exit); 2696 2697 BIND(CRC_by4_loop); 2698 ldrw(tmp, Address(post(buf, 4))); 2699 subs(len, len, 4); 2700 crc32w(crc, crc, tmp); 2701 br(Assembler::GE, CRC_by4_loop); 2702 adds(len, len, 4); 2703 br(Assembler::LE, L_exit); 2704 BIND(CRC_by1_loop); 2705 ldrb(tmp, Address(post(buf, 1))); 2706 subs(len, len, 1); 2707 crc32b(crc, crc, tmp); 2708 br(Assembler::GT, CRC_by1_loop); 2709 b(L_exit); 2710 2711 align(CodeEntryAlignment); 2712 BIND(CRC_by64_loop); 2713 subs(len, len, 64); 2714 ldp(tmp, tmp3, Address(post(buf, 16))); 2715 crc32x(crc, crc, tmp); 2716 crc32x(crc, crc, tmp3); 2717 ldp(tmp, tmp3, Address(post(buf, 16))); 2718 crc32x(crc, crc, tmp); 2719 crc32x(crc, crc, tmp3); 2720 ldp(tmp, tmp3, Address(post(buf, 16))); 2721 crc32x(crc, crc, tmp); 2722 crc32x(crc, crc, tmp3); 2723 ldp(tmp, tmp3, Address(post(buf, 16))); 2724 crc32x(crc, crc, tmp); 2725 crc32x(crc, crc, tmp3); 2726 br(Assembler::GE, CRC_by64_loop); 2727 adds(len, len, 64-4); 2728 br(Assembler::GE, CRC_by4_loop); 2729 adds(len, len, 4); 2730 br(Assembler::GT, CRC_by1_loop); 2731 BIND(L_exit); 2732 ornw(crc, zr, crc); 2733 return; 2734 } 2735 2736 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2737 if (offset) add(table0, table0, offset); 2738 add(table1, table0, 1*256*sizeof(juint)); 2739 add(table2, table0, 2*256*sizeof(juint)); 2740 add(table3, table0, 3*256*sizeof(juint)); 2741 2742 if (UseNeon) { 2743 cmp(len, 64); 2744 br(Assembler::LT, L_by16); 2745 eor(v16, T16B, v16, v16); 2746 2747 Label L_fold; 2748 2749 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 2750 2751 ld1(v0, v1, T2D, post(buf, 32)); 2752 ld1r(v4, T2D, post(tmp, 8)); 2753 ld1r(v5, T2D, post(tmp, 8)); 2754 ld1r(v6, T2D, post(tmp, 8)); 2755 ld1r(v7, T2D, post(tmp, 8)); 2756 mov(v16, T4S, 0, crc); 2757 2758 eor(v0, T16B, v0, v16); 2759 sub(len, len, 64); 2760 2761 BIND(L_fold); 2762 pmull(v22, T8H, v0, v5, T8B); 2763 pmull(v20, T8H, v0, v7, T8B); 2764 pmull(v23, T8H, v0, v4, T8B); 2765 pmull(v21, T8H, v0, v6, T8B); 2766 2767 pmull2(v18, T8H, v0, v5, T16B); 2768 pmull2(v16, T8H, v0, v7, T16B); 2769 pmull2(v19, T8H, v0, v4, T16B); 2770 pmull2(v17, T8H, v0, v6, T16B); 2771 2772 uzp1(v24, v20, v22, T8H); 2773 uzp2(v25, v20, v22, T8H); 2774 eor(v20, T16B, v24, v25); 2775 2776 uzp1(v26, v16, v18, T8H); 2777 uzp2(v27, v16, v18, T8H); 2778 eor(v16, T16B, v26, v27); 2779 2780 ushll2(v22, T4S, v20, T8H, 8); 2781 ushll(v20, T4S, v20, T4H, 8); 2782 2783 ushll2(v18, T4S, v16, T8H, 8); 2784 ushll(v16, T4S, v16, T4H, 8); 2785 2786 eor(v22, T16B, v23, v22); 2787 eor(v18, T16B, v19, v18); 2788 eor(v20, T16B, v21, v20); 2789 eor(v16, T16B, v17, v16); 2790 2791 uzp1(v17, v16, v20, T2D); 2792 uzp2(v21, v16, v20, T2D); 2793 eor(v17, T16B, v17, v21); 2794 2795 ushll2(v20, T2D, v17, T4S, 16); 2796 ushll(v16, T2D, v17, T2S, 16); 2797 2798 eor(v20, T16B, v20, v22); 2799 eor(v16, T16B, v16, v18); 2800 2801 uzp1(v17, v20, v16, T2D); 2802 uzp2(v21, v20, v16, T2D); 2803 eor(v28, T16B, v17, v21); 2804 2805 pmull(v22, T8H, v1, v5, T8B); 2806 pmull(v20, T8H, v1, v7, T8B); 2807 pmull(v23, T8H, v1, v4, T8B); 2808 pmull(v21, T8H, v1, v6, T8B); 2809 2810 pmull2(v18, T8H, v1, v5, T16B); 2811 pmull2(v16, T8H, v1, v7, T16B); 2812 pmull2(v19, T8H, v1, v4, T16B); 2813 pmull2(v17, T8H, v1, v6, T16B); 2814 2815 ld1(v0, v1, T2D, post(buf, 32)); 2816 2817 uzp1(v24, v20, v22, T8H); 2818 uzp2(v25, v20, v22, T8H); 2819 eor(v20, T16B, v24, v25); 2820 2821 uzp1(v26, v16, v18, T8H); 2822 uzp2(v27, v16, v18, T8H); 2823 eor(v16, T16B, v26, v27); 2824 2825 ushll2(v22, T4S, v20, T8H, 8); 2826 ushll(v20, T4S, v20, T4H, 8); 2827 2828 ushll2(v18, T4S, v16, T8H, 8); 2829 ushll(v16, T4S, v16, T4H, 8); 2830 2831 eor(v22, T16B, v23, v22); 2832 eor(v18, T16B, v19, v18); 2833 eor(v20, T16B, v21, v20); 2834 eor(v16, T16B, v17, v16); 2835 2836 uzp1(v17, v16, v20, T2D); 2837 uzp2(v21, v16, v20, T2D); 2838 eor(v16, T16B, v17, v21); 2839 2840 ushll2(v20, T2D, v16, T4S, 16); 2841 ushll(v16, T2D, v16, T2S, 16); 2842 2843 eor(v20, T16B, v22, v20); 2844 eor(v16, T16B, v16, v18); 2845 2846 uzp1(v17, v20, v16, T2D); 2847 uzp2(v21, v20, v16, T2D); 2848 eor(v20, T16B, v17, v21); 2849 2850 shl(v16, T2D, v28, 1); 2851 shl(v17, T2D, v20, 1); 2852 2853 eor(v0, T16B, v0, v16); 2854 eor(v1, T16B, v1, v17); 2855 2856 subs(len, len, 32); 2857 br(Assembler::GE, L_fold); 2858 2859 mov(crc, 0); 2860 mov(tmp, v0, T1D, 0); 2861 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2862 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2863 mov(tmp, v0, T1D, 1); 2864 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2865 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2866 mov(tmp, v1, T1D, 0); 2867 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2868 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2869 mov(tmp, v1, T1D, 1); 2870 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2871 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2872 2873 add(len, len, 32); 2874 } 2875 2876 BIND(L_by16); 2877 subs(len, len, 16); 2878 br(Assembler::GE, L_by16_loop); 2879 adds(len, len, 16-4); 2880 br(Assembler::GE, L_by4_loop); 2881 adds(len, len, 4); 2882 br(Assembler::GT, L_by1_loop); 2883 b(L_exit); 2884 2885 BIND(L_by4_loop); 2886 ldrw(tmp, Address(post(buf, 4))); 2887 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 2888 subs(len, len, 4); 2889 br(Assembler::GE, L_by4_loop); 2890 adds(len, len, 4); 2891 br(Assembler::LE, L_exit); 2892 BIND(L_by1_loop); 2893 subs(len, len, 1); 2894 ldrb(tmp, Address(post(buf, 1))); 2895 update_byte_crc32(crc, tmp, table0); 2896 br(Assembler::GT, L_by1_loop); 2897 b(L_exit); 2898 2899 align(CodeEntryAlignment); 2900 BIND(L_by16_loop); 2901 subs(len, len, 16); 2902 ldp(tmp, tmp3, Address(post(buf, 16))); 2903 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2904 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2905 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 2906 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 2907 br(Assembler::GE, L_by16_loop); 2908 adds(len, len, 16-4); 2909 br(Assembler::GE, L_by4_loop); 2910 adds(len, len, 4); 2911 br(Assembler::GT, L_by1_loop); 2912 BIND(L_exit); 2913 ornw(crc, zr, crc); 2914 } 2915 2916 /** 2917 * @param crc register containing existing CRC (32-bit) 2918 * @param buf register pointing to input byte buffer (byte*) 2919 * @param len register containing number of bytes 2920 * @param table register that will contain address of CRC table 2921 * @param tmp scratch register 2922 */ 2923 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 2924 Register table0, Register table1, Register table2, Register table3, 2925 Register tmp, Register tmp2, Register tmp3) { 2926 Label L_exit; 2927 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2928 2929 subs(len, len, 64); 2930 br(Assembler::GE, CRC_by64_loop); 2931 adds(len, len, 64-4); 2932 br(Assembler::GE, CRC_by4_loop); 2933 adds(len, len, 4); 2934 br(Assembler::GT, CRC_by1_loop); 2935 b(L_exit); 2936 2937 BIND(CRC_by4_loop); 2938 ldrw(tmp, Address(post(buf, 4))); 2939 subs(len, len, 4); 2940 crc32cw(crc, crc, tmp); 2941 br(Assembler::GE, CRC_by4_loop); 2942 adds(len, len, 4); 2943 br(Assembler::LE, L_exit); 2944 BIND(CRC_by1_loop); 2945 ldrb(tmp, Address(post(buf, 1))); 2946 subs(len, len, 1); 2947 crc32cb(crc, crc, tmp); 2948 br(Assembler::GT, CRC_by1_loop); 2949 b(L_exit); 2950 2951 align(CodeEntryAlignment); 2952 BIND(CRC_by64_loop); 2953 subs(len, len, 64); 2954 ldp(tmp, tmp3, Address(post(buf, 16))); 2955 crc32cx(crc, crc, tmp); 2956 crc32cx(crc, crc, tmp3); 2957 ldp(tmp, tmp3, Address(post(buf, 16))); 2958 crc32cx(crc, crc, tmp); 2959 crc32cx(crc, crc, tmp3); 2960 ldp(tmp, tmp3, Address(post(buf, 16))); 2961 crc32cx(crc, crc, tmp); 2962 crc32cx(crc, crc, tmp3); 2963 ldp(tmp, tmp3, Address(post(buf, 16))); 2964 crc32cx(crc, crc, tmp); 2965 crc32cx(crc, crc, tmp3); 2966 br(Assembler::GE, CRC_by64_loop); 2967 adds(len, len, 64-4); 2968 br(Assembler::GE, CRC_by4_loop); 2969 adds(len, len, 4); 2970 br(Assembler::GT, CRC_by1_loop); 2971 BIND(L_exit); 2972 return; 2973 } 2974 2975 SkipIfEqual::SkipIfEqual( 2976 MacroAssembler* masm, const bool* flag_addr, bool value) { 2977 _masm = masm; 2978 unsigned long offset; 2979 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 2980 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 2981 _masm->cbzw(rscratch1, _label); 2982 } 2983 2984 SkipIfEqual::~SkipIfEqual() { 2985 _masm->bind(_label); 2986 } 2987 2988 void MacroAssembler::cmpptr(Register src1, Address src2) { 2989 unsigned long offset; 2990 adrp(rscratch1, src2, offset); 2991 ldr(rscratch1, Address(rscratch1, offset)); 2992 cmp(src1, rscratch1); 2993 } 2994 2995 void MacroAssembler::store_check(Register obj, Address dst) { 2996 store_check(obj); 2997 } 2998 2999 void MacroAssembler::store_check(Register obj) { 3000 // Does a store check for the oop in register obj. The content of 3001 // register obj is destroyed afterwards. 3002 3003 BarrierSet* bs = Universe::heap()->barrier_set(); 3004 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 3005 3006 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 3007 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3008 3009 lsr(obj, obj, CardTableModRefBS::card_shift); 3010 3011 assert(CardTableModRefBS::dirty_card_val() == 0, "must be"); 3012 3013 { 3014 ExternalAddress cardtable((address) ct->byte_map_base); 3015 unsigned long offset; 3016 adrp(rscratch1, cardtable, offset); 3017 assert(offset == 0, "byte_map_base is misaligned"); 3018 } 3019 3020 if (UseCondCardMark) { 3021 Label L_already_dirty; 3022 ldrb(rscratch2, Address(obj, rscratch1)); 3023 cbz(rscratch2, L_already_dirty); 3024 strb(zr, Address(obj, rscratch1)); 3025 bind(L_already_dirty); 3026 } else { 3027 strb(zr, Address(obj, rscratch1)); 3028 } 3029 } 3030 3031 void MacroAssembler::load_klass(Register dst, Register src) { 3032 if (UseCompressedClassPointers) { 3033 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3034 decode_klass_not_null(dst); 3035 } else { 3036 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3037 } 3038 } 3039 3040 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 3041 if (UseCompressedClassPointers) { 3042 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3043 if (Universe::narrow_klass_base() == NULL) { 3044 cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift()); 3045 return; 3046 } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3047 && Universe::narrow_klass_shift() == 0) { 3048 // Only the bottom 32 bits matter 3049 cmpw(trial_klass, tmp); 3050 return; 3051 } 3052 decode_klass_not_null(tmp); 3053 } else { 3054 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3055 } 3056 cmp(trial_klass, tmp); 3057 } 3058 3059 void MacroAssembler::load_prototype_header(Register dst, Register src) { 3060 load_klass(dst, src); 3061 ldr(dst, Address(dst, Klass::prototype_header_offset())); 3062 } 3063 3064 void MacroAssembler::store_klass(Register dst, Register src) { 3065 // FIXME: Should this be a store release? concurrent gcs assumes 3066 // klass length is valid if klass field is not null. 3067 if (UseCompressedClassPointers) { 3068 encode_klass_not_null(src); 3069 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3070 } else { 3071 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3072 } 3073 } 3074 3075 void MacroAssembler::store_klass_gap(Register dst, Register src) { 3076 if (UseCompressedClassPointers) { 3077 // Store to klass gap in destination 3078 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 3079 } 3080 } 3081 3082 // Algorithm must match oop.inline.hpp encode_heap_oop. 3083 void MacroAssembler::encode_heap_oop(Register d, Register s) { 3084 #ifdef ASSERT 3085 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 3086 #endif 3087 verify_oop(s, "broken oop in encode_heap_oop"); 3088 if (Universe::narrow_oop_base() == NULL) { 3089 if (Universe::narrow_oop_shift() != 0) { 3090 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3091 lsr(d, s, LogMinObjAlignmentInBytes); 3092 } else { 3093 mov(d, s); 3094 } 3095 } else { 3096 subs(d, s, rheapbase); 3097 csel(d, d, zr, Assembler::HS); 3098 lsr(d, d, LogMinObjAlignmentInBytes); 3099 3100 /* Old algorithm: is this any worse? 3101 Label nonnull; 3102 cbnz(r, nonnull); 3103 sub(r, r, rheapbase); 3104 bind(nonnull); 3105 lsr(r, r, LogMinObjAlignmentInBytes); 3106 */ 3107 } 3108 } 3109 3110 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3111 #ifdef ASSERT 3112 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 3113 if (CheckCompressedOops) { 3114 Label ok; 3115 cbnz(r, ok); 3116 stop("null oop passed to encode_heap_oop_not_null"); 3117 bind(ok); 3118 } 3119 #endif 3120 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 3121 if (Universe::narrow_oop_base() != NULL) { 3122 sub(r, r, rheapbase); 3123 } 3124 if (Universe::narrow_oop_shift() != 0) { 3125 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3126 lsr(r, r, LogMinObjAlignmentInBytes); 3127 } 3128 } 3129 3130 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 3131 #ifdef ASSERT 3132 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 3133 if (CheckCompressedOops) { 3134 Label ok; 3135 cbnz(src, ok); 3136 stop("null oop passed to encode_heap_oop_not_null2"); 3137 bind(ok); 3138 } 3139 #endif 3140 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 3141 3142 Register data = src; 3143 if (Universe::narrow_oop_base() != NULL) { 3144 sub(dst, src, rheapbase); 3145 data = dst; 3146 } 3147 if (Universe::narrow_oop_shift() != 0) { 3148 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3149 lsr(dst, data, LogMinObjAlignmentInBytes); 3150 data = dst; 3151 } 3152 if (data == src) 3153 mov(dst, src); 3154 } 3155 3156 void MacroAssembler::decode_heap_oop(Register d, Register s) { 3157 #ifdef ASSERT 3158 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 3159 #endif 3160 if (Universe::narrow_oop_base() == NULL) { 3161 if (Universe::narrow_oop_shift() != 0 || d != s) { 3162 lsl(d, s, Universe::narrow_oop_shift()); 3163 } 3164 } else { 3165 Label done; 3166 if (d != s) 3167 mov(d, s); 3168 cbz(s, done); 3169 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 3170 bind(done); 3171 } 3172 verify_oop(d, "broken oop in decode_heap_oop"); 3173 } 3174 3175 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3176 assert (UseCompressedOops, "should only be used for compressed headers"); 3177 assert (Universe::heap() != NULL, "java heap should be initialized"); 3178 // Cannot assert, unverified entry point counts instructions (see .ad file) 3179 // vtableStubs also counts instructions in pd_code_size_limit. 3180 // Also do not verify_oop as this is called by verify_oop. 3181 if (Universe::narrow_oop_shift() != 0) { 3182 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3183 if (Universe::narrow_oop_base() != NULL) { 3184 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3185 } else { 3186 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3187 } 3188 } else { 3189 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3190 } 3191 } 3192 3193 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 3194 assert (UseCompressedOops, "should only be used for compressed headers"); 3195 assert (Universe::heap() != NULL, "java heap should be initialized"); 3196 // Cannot assert, unverified entry point counts instructions (see .ad file) 3197 // vtableStubs also counts instructions in pd_code_size_limit. 3198 // Also do not verify_oop as this is called by verify_oop. 3199 if (Universe::narrow_oop_shift() != 0) { 3200 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3201 if (Universe::narrow_oop_base() != NULL) { 3202 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3203 } else { 3204 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3205 } 3206 } else { 3207 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3208 if (dst != src) { 3209 mov(dst, src); 3210 } 3211 } 3212 } 3213 3214 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3215 if (Universe::narrow_klass_base() == NULL) { 3216 if (Universe::narrow_klass_shift() != 0) { 3217 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3218 lsr(dst, src, LogKlassAlignmentInBytes); 3219 } else { 3220 if (dst != src) mov(dst, src); 3221 } 3222 return; 3223 } 3224 3225 if (use_XOR_for_compressed_class_base) { 3226 if (Universe::narrow_klass_shift() != 0) { 3227 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3228 lsr(dst, dst, LogKlassAlignmentInBytes); 3229 } else { 3230 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3231 } 3232 return; 3233 } 3234 3235 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3236 && Universe::narrow_klass_shift() == 0) { 3237 movw(dst, src); 3238 return; 3239 } 3240 3241 #ifdef ASSERT 3242 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 3243 #endif 3244 3245 Register rbase = dst; 3246 if (dst == src) rbase = rheapbase; 3247 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3248 sub(dst, src, rbase); 3249 if (Universe::narrow_klass_shift() != 0) { 3250 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3251 lsr(dst, dst, LogKlassAlignmentInBytes); 3252 } 3253 if (dst == src) reinit_heapbase(); 3254 } 3255 3256 void MacroAssembler::encode_klass_not_null(Register r) { 3257 encode_klass_not_null(r, r); 3258 } 3259 3260 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3261 Register rbase = dst; 3262 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3263 3264 if (Universe::narrow_klass_base() == NULL) { 3265 if (Universe::narrow_klass_shift() != 0) { 3266 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3267 lsl(dst, src, LogKlassAlignmentInBytes); 3268 } else { 3269 if (dst != src) mov(dst, src); 3270 } 3271 return; 3272 } 3273 3274 if (use_XOR_for_compressed_class_base) { 3275 if (Universe::narrow_klass_shift() != 0) { 3276 lsl(dst, src, LogKlassAlignmentInBytes); 3277 eor(dst, dst, (uint64_t)Universe::narrow_klass_base()); 3278 } else { 3279 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3280 } 3281 return; 3282 } 3283 3284 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3285 && Universe::narrow_klass_shift() == 0) { 3286 if (dst != src) 3287 movw(dst, src); 3288 movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32); 3289 return; 3290 } 3291 3292 // Cannot assert, unverified entry point counts instructions (see .ad file) 3293 // vtableStubs also counts instructions in pd_code_size_limit. 3294 // Also do not verify_oop as this is called by verify_oop. 3295 if (dst == src) rbase = rheapbase; 3296 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3297 if (Universe::narrow_klass_shift() != 0) { 3298 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3299 add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); 3300 } else { 3301 add(dst, rbase, src); 3302 } 3303 if (dst == src) reinit_heapbase(); 3304 } 3305 3306 void MacroAssembler::decode_klass_not_null(Register r) { 3307 decode_klass_not_null(r, r); 3308 } 3309 3310 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 3311 assert (UseCompressedOops, "should only be used for compressed oops"); 3312 assert (Universe::heap() != NULL, "java heap should be initialized"); 3313 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3314 3315 int oop_index = oop_recorder()->find_index(obj); 3316 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3317 3318 InstructionMark im(this); 3319 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3320 code_section()->relocate(inst_mark(), rspec); 3321 movz(dst, 0xDEAD, 16); 3322 movk(dst, 0xBEEF); 3323 } 3324 3325 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 3326 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3327 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3328 int index = oop_recorder()->find_index(k); 3329 assert(! Universe::heap()->is_in_reserved(k), "should not be an oop"); 3330 3331 InstructionMark im(this); 3332 RelocationHolder rspec = metadata_Relocation::spec(index); 3333 code_section()->relocate(inst_mark(), rspec); 3334 narrowKlass nk = Klass::encode_klass(k); 3335 movz(dst, (nk >> 16), 16); 3336 movk(dst, nk & 0xffff); 3337 } 3338 3339 void MacroAssembler::load_heap_oop(Register dst, Address src) 3340 { 3341 if (UseCompressedOops) { 3342 ldrw(dst, src); 3343 decode_heap_oop(dst); 3344 } else { 3345 ldr(dst, src); 3346 } 3347 } 3348 3349 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) 3350 { 3351 if (UseCompressedOops) { 3352 ldrw(dst, src); 3353 decode_heap_oop_not_null(dst); 3354 } else { 3355 ldr(dst, src); 3356 } 3357 } 3358 3359 void MacroAssembler::store_heap_oop(Address dst, Register src) { 3360 if (UseCompressedOops) { 3361 assert(!dst.uses(src), "not enough registers"); 3362 encode_heap_oop(src); 3363 strw(src, dst); 3364 } else 3365 str(src, dst); 3366 } 3367 3368 // Used for storing NULLs. 3369 void MacroAssembler::store_heap_oop_null(Address dst) { 3370 if (UseCompressedOops) { 3371 strw(zr, dst); 3372 } else 3373 str(zr, dst); 3374 } 3375 3376 #if INCLUDE_ALL_GCS 3377 void MacroAssembler::g1_write_barrier_pre(Register obj, 3378 Register pre_val, 3379 Register thread, 3380 Register tmp, 3381 bool tosca_live, 3382 bool expand_call) { 3383 // If expand_call is true then we expand the call_VM_leaf macro 3384 // directly to skip generating the check by 3385 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 3386 3387 assert(thread == rthread, "must be"); 3388 3389 Label done; 3390 Label runtime; 3391 3392 assert(pre_val != noreg, "check this code"); 3393 3394 if (obj != noreg) 3395 assert_different_registers(obj, pre_val, tmp); 3396 3397 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3398 PtrQueue::byte_offset_of_active())); 3399 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3400 PtrQueue::byte_offset_of_index())); 3401 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3402 PtrQueue::byte_offset_of_buf())); 3403 3404 3405 // Is marking active? 3406 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3407 ldrw(tmp, in_progress); 3408 } else { 3409 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 3410 ldrb(tmp, in_progress); 3411 } 3412 cbzw(tmp, done); 3413 3414 // Do we need to load the previous value? 3415 if (obj != noreg) { 3416 load_heap_oop(pre_val, Address(obj, 0)); 3417 } 3418 3419 // Is the previous value null? 3420 cbz(pre_val, done); 3421 3422 // Can we store original value in the thread's buffer? 3423 // Is index == 0? 3424 // (The index field is typed as size_t.) 3425 3426 ldr(tmp, index); // tmp := *index_adr 3427 cbz(tmp, runtime); // tmp == 0? 3428 // If yes, goto runtime 3429 3430 sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 3431 str(tmp, index); // *index_adr := tmp 3432 ldr(rscratch1, buffer); 3433 add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 3434 3435 // Record the previous value 3436 str(pre_val, Address(tmp, 0)); 3437 b(done); 3438 3439 bind(runtime); 3440 // save the live input values 3441 push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3442 3443 // Calling the runtime using the regular call_VM_leaf mechanism generates 3444 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 3445 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 3446 // 3447 // If we care generating the pre-barrier without a frame (e.g. in the 3448 // intrinsified Reference.get() routine) then ebp might be pointing to 3449 // the caller frame and so this check will most likely fail at runtime. 3450 // 3451 // Expanding the call directly bypasses the generation of the check. 3452 // So when we do not have have a full interpreter frame on the stack 3453 // expand_call should be passed true. 3454 3455 if (expand_call) { 3456 assert(pre_val != c_rarg1, "smashed arg"); 3457 pass_arg1(this, thread); 3458 pass_arg0(this, pre_val); 3459 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 3460 } else { 3461 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 3462 } 3463 3464 pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3465 3466 bind(done); 3467 } 3468 3469 void MacroAssembler::g1_write_barrier_post(Register store_addr, 3470 Register new_val, 3471 Register thread, 3472 Register tmp, 3473 Register tmp2) { 3474 assert(thread == rthread, "must be"); 3475 3476 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3477 PtrQueue::byte_offset_of_index())); 3478 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3479 PtrQueue::byte_offset_of_buf())); 3480 3481 BarrierSet* bs = Universe::heap()->barrier_set(); 3482 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 3483 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3484 3485 Label done; 3486 Label runtime; 3487 3488 // Does store cross heap regions? 3489 3490 eor(tmp, store_addr, new_val); 3491 lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes); 3492 cbz(tmp, done); 3493 3494 // crosses regions, storing NULL? 3495 3496 cbz(new_val, done); 3497 3498 // storing region crossing non-NULL, is card already dirty? 3499 3500 ExternalAddress cardtable((address) ct->byte_map_base); 3501 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3502 const Register card_addr = tmp; 3503 3504 lsr(card_addr, store_addr, CardTableModRefBS::card_shift); 3505 3506 unsigned long offset; 3507 adrp(tmp2, cardtable, offset); 3508 3509 // get the address of the card 3510 add(card_addr, card_addr, tmp2); 3511 ldrb(tmp2, Address(card_addr, offset)); 3512 cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3513 br(Assembler::EQ, done); 3514 3515 assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); 3516 3517 membar(Assembler::StoreLoad); 3518 3519 ldrb(tmp2, Address(card_addr, offset)); 3520 cbzw(tmp2, done); 3521 3522 // storing a region crossing, non-NULL oop, card is clean. 3523 // dirty card and log. 3524 3525 strb(zr, Address(card_addr, offset)); 3526 3527 ldr(rscratch1, queue_index); 3528 cbz(rscratch1, runtime); 3529 sub(rscratch1, rscratch1, wordSize); 3530 str(rscratch1, queue_index); 3531 3532 ldr(tmp2, buffer); 3533 str(card_addr, Address(tmp2, rscratch1)); 3534 b(done); 3535 3536 bind(runtime); 3537 // save the live input values 3538 push(store_addr->bit(true) | new_val->bit(true), sp); 3539 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 3540 pop(store_addr->bit(true) | new_val->bit(true), sp); 3541 3542 bind(done); 3543 } 3544 3545 #endif // INCLUDE_ALL_GCS 3546 3547 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 3548 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 3549 int index = oop_recorder()->allocate_metadata_index(obj); 3550 RelocationHolder rspec = metadata_Relocation::spec(index); 3551 return Address((address)obj, rspec); 3552 } 3553 3554 // Move an oop into a register. immediate is true if we want 3555 // immediate instrcutions, i.e. we are not going to patch this 3556 // instruction while the code is being executed by another thread. In 3557 // that case we can use move immediates rather than the constant pool. 3558 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { 3559 int oop_index; 3560 if (obj == NULL) { 3561 oop_index = oop_recorder()->allocate_oop_index(obj); 3562 } else { 3563 oop_index = oop_recorder()->find_index(obj); 3564 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3565 } 3566 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3567 if (! immediate) { 3568 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 3569 ldr_constant(dst, Address(dummy, rspec)); 3570 } else 3571 mov(dst, Address((address)obj, rspec)); 3572 } 3573 3574 // Move a metadata address into a register. 3575 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 3576 int oop_index; 3577 if (obj == NULL) { 3578 oop_index = oop_recorder()->allocate_metadata_index(obj); 3579 } else { 3580 oop_index = oop_recorder()->find_index(obj); 3581 } 3582 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 3583 mov(dst, Address((address)obj, rspec)); 3584 } 3585 3586 Address MacroAssembler::constant_oop_address(jobject obj) { 3587 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3588 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 3589 int oop_index = oop_recorder()->find_index(obj); 3590 return Address((address)obj, oop_Relocation::spec(oop_index)); 3591 } 3592 3593 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3594 void MacroAssembler::tlab_allocate(Register obj, 3595 Register var_size_in_bytes, 3596 int con_size_in_bytes, 3597 Register t1, 3598 Register t2, 3599 Label& slow_case) { 3600 assert_different_registers(obj, t2); 3601 assert_different_registers(obj, var_size_in_bytes); 3602 Register end = t2; 3603 3604 // verify_tlab(); 3605 3606 ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 3607 if (var_size_in_bytes == noreg) { 3608 lea(end, Address(obj, con_size_in_bytes)); 3609 } else { 3610 lea(end, Address(obj, var_size_in_bytes)); 3611 } 3612 ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 3613 cmp(end, rscratch1); 3614 br(Assembler::HI, slow_case); 3615 3616 // update the tlab top pointer 3617 str(end, Address(rthread, JavaThread::tlab_top_offset())); 3618 3619 // recover var_size_in_bytes if necessary 3620 if (var_size_in_bytes == end) { 3621 sub(var_size_in_bytes, var_size_in_bytes, obj); 3622 } 3623 // verify_tlab(); 3624 } 3625 3626 // Preserves r19, and r3. 3627 Register MacroAssembler::tlab_refill(Label& retry, 3628 Label& try_eden, 3629 Label& slow_case) { 3630 Register top = r0; 3631 Register t1 = r2; 3632 Register t2 = r4; 3633 assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); 3634 Label do_refill, discard_tlab; 3635 3636 if (!Universe::heap()->supports_inline_contig_alloc()) { 3637 // No allocation in the shared eden. 3638 b(slow_case); 3639 } 3640 3641 ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3642 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3643 3644 // calculate amount of free space 3645 sub(t1, t1, top); 3646 lsr(t1, t1, LogHeapWordSize); 3647 3648 // Retain tlab and allocate object in shared space if 3649 // the amount free in the tlab is too large to discard. 3650 3651 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3652 cmp(t1, rscratch1); 3653 br(Assembler::LE, discard_tlab); 3654 3655 // Retain 3656 // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3657 mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3658 add(rscratch1, rscratch1, t2); 3659 str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3660 3661 if (TLABStats) { 3662 // increment number of slow_allocations 3663 addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), 3664 1, rscratch1); 3665 } 3666 b(try_eden); 3667 3668 bind(discard_tlab); 3669 if (TLABStats) { 3670 // increment number of refills 3671 addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, 3672 rscratch1); 3673 // accumulate wastage -- t1 is amount free in tlab 3674 addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, 3675 rscratch1); 3676 } 3677 3678 // if tlab is currently allocated (top or end != null) then 3679 // fill [top, end + alignment_reserve) with array object 3680 cbz(top, do_refill); 3681 3682 // set up the mark word 3683 mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); 3684 str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); 3685 // set the length to the remaining space 3686 sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); 3687 add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); 3688 lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); 3689 strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); 3690 // set klass to intArrayKlass 3691 { 3692 unsigned long offset; 3693 // dubious reloc why not an oop reloc? 3694 adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), 3695 offset); 3696 ldr(t1, Address(rscratch1, offset)); 3697 } 3698 // store klass last. concurrent gcs assumes klass length is valid if 3699 // klass field is not null. 3700 store_klass(top, t1); 3701 3702 mov(t1, top); 3703 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3704 sub(t1, t1, rscratch1); 3705 incr_allocated_bytes(rthread, t1, 0, rscratch1); 3706 3707 // refill the tlab with an eden allocation 3708 bind(do_refill); 3709 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3710 lsl(t1, t1, LogHeapWordSize); 3711 // allocate new tlab, address returned in top 3712 eden_allocate(top, t1, 0, t2, slow_case); 3713 3714 // Check that t1 was preserved in eden_allocate. 3715 #ifdef ASSERT 3716 if (UseTLAB) { 3717 Label ok; 3718 Register tsize = r4; 3719 assert_different_registers(tsize, rthread, t1); 3720 str(tsize, Address(pre(sp, -16))); 3721 ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3722 lsl(tsize, tsize, LogHeapWordSize); 3723 cmp(t1, tsize); 3724 br(Assembler::EQ, ok); 3725 STOP("assert(t1 != tlab size)"); 3726 should_not_reach_here(); 3727 3728 bind(ok); 3729 ldr(tsize, Address(post(sp, 16))); 3730 } 3731 #endif 3732 str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3733 str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3734 add(top, top, t1); 3735 sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); 3736 str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3737 verify_tlab(); 3738 b(retry); 3739 3740 return rthread; // for use by caller 3741 } 3742 3743 // Defines obj, preserves var_size_in_bytes 3744 void MacroAssembler::eden_allocate(Register obj, 3745 Register var_size_in_bytes, 3746 int con_size_in_bytes, 3747 Register t1, 3748 Label& slow_case) { 3749 assert_different_registers(obj, var_size_in_bytes, t1); 3750 if (!Universe::heap()->supports_inline_contig_alloc()) { 3751 b(slow_case); 3752 } else { 3753 Register end = t1; 3754 Register heap_end = rscratch2; 3755 Label retry; 3756 bind(retry); 3757 { 3758 unsigned long offset; 3759 adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); 3760 ldr(heap_end, Address(rscratch1, offset)); 3761 } 3762 3763 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 3764 3765 // Get the current top of the heap 3766 { 3767 unsigned long offset; 3768 adrp(rscratch1, heap_top, offset); 3769 // Use add() here after ARDP, rather than lea(). 3770 // lea() does not generate anything if its offset is zero. 3771 // However, relocs expect to find either an ADD or a load/store 3772 // insn after an ADRP. add() always generates an ADD insn, even 3773 // for add(Rn, Rn, 0). 3774 add(rscratch1, rscratch1, offset); 3775 ldaxr(obj, rscratch1); 3776 } 3777 3778 // Adjust it my the size of our new object 3779 if (var_size_in_bytes == noreg) { 3780 lea(end, Address(obj, con_size_in_bytes)); 3781 } else { 3782 lea(end, Address(obj, var_size_in_bytes)); 3783 } 3784 3785 // if end < obj then we wrapped around high memory 3786 cmp(end, obj); 3787 br(Assembler::LO, slow_case); 3788 3789 cmp(end, heap_end); 3790 br(Assembler::HI, slow_case); 3791 3792 // If heap_top hasn't been changed by some other thread, update it. 3793 stlxr(rscratch2, end, rscratch1); 3794 cbnzw(rscratch2, retry); 3795 } 3796 } 3797 3798 void MacroAssembler::verify_tlab() { 3799 #ifdef ASSERT 3800 if (UseTLAB && VerifyOops) { 3801 Label next, ok; 3802 3803 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 3804 3805 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3806 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3807 cmp(rscratch2, rscratch1); 3808 br(Assembler::HS, next); 3809 STOP("assert(top >= start)"); 3810 should_not_reach_here(); 3811 3812 bind(next); 3813 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3814 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3815 cmp(rscratch2, rscratch1); 3816 br(Assembler::HS, ok); 3817 STOP("assert(top <= end)"); 3818 should_not_reach_here(); 3819 3820 bind(ok); 3821 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 3822 } 3823 #endif 3824 } 3825 3826 // Writes to stack successive pages until offset reached to check for 3827 // stack overflow + shadow pages. This clobbers tmp. 3828 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 3829 assert_different_registers(tmp, size, rscratch1); 3830 mov(tmp, sp); 3831 // Bang stack for total size given plus shadow page size. 3832 // Bang one page at a time because large size can bang beyond yellow and 3833 // red zones. 3834 Label loop; 3835 mov(rscratch1, os::vm_page_size()); 3836 bind(loop); 3837 lea(tmp, Address(tmp, -os::vm_page_size())); 3838 subsw(size, size, rscratch1); 3839 str(size, Address(tmp)); 3840 br(Assembler::GT, loop); 3841 3842 // Bang down shadow pages too. 3843 // At this point, (tmp-0) is the last address touched, so don't 3844 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3845 // was post-decremented.) Skip this address by starting at i=1, and 3846 // touch a few more pages below. N.B. It is important to touch all 3847 // the way down to and including i=StackShadowPages. 3848 for (int i = 0; i< StackShadowPages-1; i++) { 3849 // this could be any sized move but this is can be a debugging crumb 3850 // so the bigger the better. 3851 lea(tmp, Address(tmp, -os::vm_page_size())); 3852 str(size, Address(tmp)); 3853 } 3854 } 3855 3856 3857 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) { 3858 unsigned long off; 3859 adrp(r, Address(page, rtype), off); 3860 InstructionMark im(this); 3861 code_section()->relocate(inst_mark(), rtype); 3862 ldrw(zr, Address(r, off)); 3863 return inst_mark(); 3864 } 3865 3866 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 3867 InstructionMark im(this); 3868 code_section()->relocate(inst_mark(), rtype); 3869 ldrw(zr, Address(r, 0)); 3870 return inst_mark(); 3871 } 3872 3873 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { 3874 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 3875 if (uabs(pc() - dest.target()) >= (1LL << 32)) { 3876 guarantee(rtype == relocInfo::none 3877 || rtype == relocInfo::external_word_type 3878 || rtype == relocInfo::poll_type 3879 || rtype == relocInfo::poll_return_type, 3880 "can only use a fixed address with an ADRP"); 3881 // Out of range. This doesn't happen very often, but we have to 3882 // handle it 3883 mov(reg1, dest); 3884 byte_offset = 0; 3885 } else { 3886 InstructionMark im(this); 3887 code_section()->relocate(inst_mark(), dest.rspec()); 3888 byte_offset = (uint64_t)dest.target() & 0xfff; 3889 _adrp(reg1, dest.target()); 3890 } 3891 } 3892 3893 void MacroAssembler::build_frame(int framesize) { 3894 assert(framesize > 0, "framesize must be > 0"); 3895 if (framesize < ((1 << 9) + 2 * wordSize)) { 3896 sub(sp, sp, framesize); 3897 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3898 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 3899 } else { 3900 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 3901 if (PreserveFramePointer) mov(rfp, sp); 3902 if (framesize < ((1 << 12) + 2 * wordSize)) 3903 sub(sp, sp, framesize - 2 * wordSize); 3904 else { 3905 mov(rscratch1, framesize - 2 * wordSize); 3906 sub(sp, sp, rscratch1); 3907 } 3908 } 3909 } 3910 3911 void MacroAssembler::remove_frame(int framesize) { 3912 assert(framesize > 0, "framesize must be > 0"); 3913 if (framesize < ((1 << 9) + 2 * wordSize)) { 3914 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3915 add(sp, sp, framesize); 3916 } else { 3917 if (framesize < ((1 << 12) + 2 * wordSize)) 3918 add(sp, sp, framesize - 2 * wordSize); 3919 else { 3920 mov(rscratch1, framesize - 2 * wordSize); 3921 add(sp, sp, rscratch1); 3922 } 3923 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 3924 } 3925 } 3926 3927 3928 // Search for str1 in str2 and return index or -1 3929 void MacroAssembler::string_indexof(Register str2, Register str1, 3930 Register cnt2, Register cnt1, 3931 Register tmp1, Register tmp2, 3932 Register tmp3, Register tmp4, 3933 int icnt1, Register result) { 3934 Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH; 3935 3936 Register ch1 = rscratch1; 3937 Register ch2 = rscratch2; 3938 Register cnt1tmp = tmp1; 3939 Register cnt2tmp = tmp2; 3940 Register cnt1_neg = cnt1; 3941 Register cnt2_neg = cnt2; 3942 Register result_tmp = tmp4; 3943 3944 // Note, inline_string_indexOf() generates checks: 3945 // if (substr.count > string.count) return -1; 3946 // if (substr.count == 0) return 0; 3947 3948 // We have two strings, a source string in str2, cnt2 and a pattern string 3949 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1. 3950 3951 // For larger pattern and source we use a simplified Boyer Moore algorithm. 3952 // With a small pattern and source we use linear scan. 3953 3954 if (icnt1 == -1) { 3955 cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 3956 ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use 3957 br(LO, LINEARSEARCH); // a byte array. 3958 cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM 3959 br(HS, LINEARSEARCH); 3960 } 3961 3962 // The Boyer Moore alogorithm is based on the description here:- 3963 // 3964 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm 3965 // 3966 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule 3967 // and the 'Good Suffix' rule. 3968 // 3969 // These rules are essentially heuristics for how far we can shift the 3970 // pattern along the search string. 3971 // 3972 // The implementation here uses the 'Bad Character' rule only because of the 3973 // complexity of initialisation for the 'Good Suffix' rule. 3974 // 3975 // This is also known as the Boyer-Moore-Horspool algorithm:- 3976 // 3977 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm 3978 // 3979 // #define ASIZE 128 3980 // 3981 // int bm(unsigned char *x, int m, unsigned char *y, int n) { 3982 // int i, j; 3983 // unsigned c; 3984 // unsigned char bc[ASIZE]; 3985 // 3986 // /* Preprocessing */ 3987 // for (i = 0; i < ASIZE; ++i) 3988 // bc[i] = 0; 3989 // for (i = 0; i < m - 1; ) { 3990 // c = x[i]; 3991 // ++i; 3992 // if (c < ASIZE) bc[c] = i; 3993 // } 3994 // 3995 // /* Searching */ 3996 // j = 0; 3997 // while (j <= n - m) { 3998 // c = y[i+j]; 3999 // if (x[m-1] == c) 4000 // for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i); 4001 // if (i < 0) return j; 4002 // if (c < ASIZE) 4003 // j = j - bc[y[j+m-1]] + m; 4004 // else 4005 // j += 1; // Advance by 1 only if char >= ASIZE 4006 // } 4007 // } 4008 4009 if (icnt1 == -1) { 4010 BIND(BM); 4011 4012 Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP; 4013 Label BMADV, BMMATCH, BMCHECKEND; 4014 4015 Register cnt1end = tmp2; 4016 Register str2end = cnt2; 4017 Register skipch = tmp2; 4018 4019 // Restrict ASIZE to 128 to reduce stack space/initialisation. 4020 // The presence of chars >= ASIZE in the target string does not affect 4021 // performance, but we must be careful not to initialise them in the stack 4022 // array. 4023 // The presence of chars >= ASIZE in the source string may adversely affect 4024 // performance since we can only advance by one when we encounter one. 4025 4026 stp(zr, zr, pre(sp, -128)); 4027 for (int i = 1; i < 8; i++) 4028 stp(zr, zr, Address(sp, i*16)); 4029 4030 mov(cnt1tmp, 0); 4031 sub(cnt1end, cnt1, 1); 4032 BIND(BCLOOP); 4033 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4034 cmp(ch1, 128); 4035 add(cnt1tmp, cnt1tmp, 1); 4036 br(HS, BCSKIP); 4037 strb(cnt1tmp, Address(sp, ch1)); 4038 BIND(BCSKIP); 4039 cmp(cnt1tmp, cnt1end); 4040 br(LT, BCLOOP); 4041 4042 mov(result_tmp, str2); 4043 4044 sub(cnt2, cnt2, cnt1); 4045 add(str2end, str2, cnt2, LSL, 1); 4046 BIND(BMLOOPSTR2); 4047 sub(cnt1tmp, cnt1, 1); 4048 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4049 ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1))); 4050 cmp(ch1, skipch); 4051 br(NE, BMSKIP); 4052 subs(cnt1tmp, cnt1tmp, 1); 4053 br(LT, BMMATCH); 4054 BIND(BMLOOPSTR1); 4055 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4056 ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1))); 4057 cmp(ch1, ch2); 4058 br(NE, BMSKIP); 4059 subs(cnt1tmp, cnt1tmp, 1); 4060 br(GE, BMLOOPSTR1); 4061 BIND(BMMATCH); 4062 sub(result_tmp, str2, result_tmp); 4063 lsr(result, result_tmp, 1); 4064 add(sp, sp, 128); 4065 b(DONE); 4066 BIND(BMADV); 4067 add(str2, str2, 2); 4068 b(BMCHECKEND); 4069 BIND(BMSKIP); 4070 cmp(skipch, 128); 4071 br(HS, BMADV); 4072 ldrb(ch2, Address(sp, skipch)); 4073 add(str2, str2, cnt1, LSL, 1); 4074 sub(str2, str2, ch2, LSL, 1); 4075 BIND(BMCHECKEND); 4076 cmp(str2, str2end); 4077 br(LE, BMLOOPSTR2); 4078 add(sp, sp, 128); 4079 b(NOMATCH); 4080 } 4081 4082 BIND(LINEARSEARCH); 4083 { 4084 Label DO1, DO2, DO3; 4085 4086 Register str2tmp = tmp2; 4087 Register first = tmp3; 4088 4089 if (icnt1 == -1) 4090 { 4091 Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD; 4092 4093 cmp(cnt1, 4); 4094 br(LT, DOSHORT); 4095 4096 sub(cnt2, cnt2, cnt1); 4097 sub(cnt1, cnt1, 4); 4098 mov(result_tmp, cnt2); 4099 4100 lea(str1, Address(str1, cnt1, Address::uxtw(1))); 4101 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4102 sub(cnt1_neg, zr, cnt1, LSL, 1); 4103 sub(cnt2_neg, zr, cnt2, LSL, 1); 4104 ldr(first, Address(str1, cnt1_neg)); 4105 4106 BIND(FIRST_LOOP); 4107 ldr(ch2, Address(str2, cnt2_neg)); 4108 cmp(first, ch2); 4109 br(EQ, STR1_LOOP); 4110 BIND(STR2_NEXT); 4111 adds(cnt2_neg, cnt2_neg, 2); 4112 br(LE, FIRST_LOOP); 4113 b(NOMATCH); 4114 4115 BIND(STR1_LOOP); 4116 adds(cnt1tmp, cnt1_neg, 8); 4117 add(cnt2tmp, cnt2_neg, 8); 4118 br(GE, LAST_WORD); 4119 4120 BIND(STR1_NEXT); 4121 ldr(ch1, Address(str1, cnt1tmp)); 4122 ldr(ch2, Address(str2, cnt2tmp)); 4123 cmp(ch1, ch2); 4124 br(NE, STR2_NEXT); 4125 adds(cnt1tmp, cnt1tmp, 8); 4126 add(cnt2tmp, cnt2tmp, 8); 4127 br(LT, STR1_NEXT); 4128 4129 BIND(LAST_WORD); 4130 ldr(ch1, Address(str1)); 4131 sub(str2tmp, str2, cnt1_neg); // adjust to corresponding 4132 ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2 4133 cmp(ch1, ch2); 4134 br(NE, STR2_NEXT); 4135 b(MATCH); 4136 4137 BIND(DOSHORT); 4138 cmp(cnt1, 2); 4139 br(LT, DO1); 4140 br(GT, DO3); 4141 } 4142 4143 if (icnt1 == 4) { 4144 Label CH1_LOOP; 4145 4146 ldr(ch1, str1); 4147 sub(cnt2, cnt2, 4); 4148 mov(result_tmp, cnt2); 4149 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4150 sub(cnt2_neg, zr, cnt2, LSL, 1); 4151 4152 BIND(CH1_LOOP); 4153 ldr(ch2, Address(str2, cnt2_neg)); 4154 cmp(ch1, ch2); 4155 br(EQ, MATCH); 4156 adds(cnt2_neg, cnt2_neg, 2); 4157 br(LE, CH1_LOOP); 4158 b(NOMATCH); 4159 } 4160 4161 if (icnt1 == -1 || icnt1 == 2) { 4162 Label CH1_LOOP; 4163 4164 BIND(DO2); 4165 ldrw(ch1, str1); 4166 sub(cnt2, cnt2, 2); 4167 mov(result_tmp, cnt2); 4168 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4169 sub(cnt2_neg, zr, cnt2, LSL, 1); 4170 4171 BIND(CH1_LOOP); 4172 ldrw(ch2, Address(str2, cnt2_neg)); 4173 cmp(ch1, ch2); 4174 br(EQ, MATCH); 4175 adds(cnt2_neg, cnt2_neg, 2); 4176 br(LE, CH1_LOOP); 4177 b(NOMATCH); 4178 } 4179 4180 if (icnt1 == -1 || icnt1 == 3) { 4181 Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; 4182 4183 BIND(DO3); 4184 ldrw(first, str1); 4185 ldrh(ch1, Address(str1, 4)); 4186 4187 sub(cnt2, cnt2, 3); 4188 mov(result_tmp, cnt2); 4189 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4190 sub(cnt2_neg, zr, cnt2, LSL, 1); 4191 4192 BIND(FIRST_LOOP); 4193 ldrw(ch2, Address(str2, cnt2_neg)); 4194 cmpw(first, ch2); 4195 br(EQ, STR1_LOOP); 4196 BIND(STR2_NEXT); 4197 adds(cnt2_neg, cnt2_neg, 2); 4198 br(LE, FIRST_LOOP); 4199 b(NOMATCH); 4200 4201 BIND(STR1_LOOP); 4202 add(cnt2tmp, cnt2_neg, 4); 4203 ldrh(ch2, Address(str2, cnt2tmp)); 4204 cmp(ch1, ch2); 4205 br(NE, STR2_NEXT); 4206 b(MATCH); 4207 } 4208 4209 if (icnt1 == -1 || icnt1 == 1) { 4210 Label CH1_LOOP, HAS_ZERO; 4211 Label DO1_SHORT, DO1_LOOP; 4212 4213 BIND(DO1); 4214 ldrh(ch1, str1); 4215 cmp(cnt2, 4); 4216 br(LT, DO1_SHORT); 4217 4218 orr(ch1, ch1, ch1, LSL, 16); 4219 orr(ch1, ch1, ch1, LSL, 32); 4220 4221 sub(cnt2, cnt2, 4); 4222 mov(result_tmp, cnt2); 4223 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4224 sub(cnt2_neg, zr, cnt2, LSL, 1); 4225 4226 mov(tmp3, 0x0001000100010001); 4227 BIND(CH1_LOOP); 4228 ldr(ch2, Address(str2, cnt2_neg)); 4229 eor(ch2, ch1, ch2); 4230 sub(tmp1, ch2, tmp3); 4231 orr(tmp2, ch2, 0x7fff7fff7fff7fff); 4232 bics(tmp1, tmp1, tmp2); 4233 br(NE, HAS_ZERO); 4234 adds(cnt2_neg, cnt2_neg, 8); 4235 br(LT, CH1_LOOP); 4236 4237 cmp(cnt2_neg, 8); 4238 mov(cnt2_neg, 0); 4239 br(LT, CH1_LOOP); 4240 b(NOMATCH); 4241 4242 BIND(HAS_ZERO); 4243 rev(tmp1, tmp1); 4244 clz(tmp1, tmp1); 4245 add(cnt2_neg, cnt2_neg, tmp1, LSR, 3); 4246 b(MATCH); 4247 4248 BIND(DO1_SHORT); 4249 mov(result_tmp, cnt2); 4250 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4251 sub(cnt2_neg, zr, cnt2, LSL, 1); 4252 BIND(DO1_LOOP); 4253 ldrh(ch2, Address(str2, cnt2_neg)); 4254 cmpw(ch1, ch2); 4255 br(EQ, MATCH); 4256 adds(cnt2_neg, cnt2_neg, 2); 4257 br(LT, DO1_LOOP); 4258 } 4259 } 4260 BIND(NOMATCH); 4261 mov(result, -1); 4262 b(DONE); 4263 BIND(MATCH); 4264 add(result, result_tmp, cnt2_neg, ASR, 1); 4265 BIND(DONE); 4266 } 4267 4268 // Compare strings. 4269 void MacroAssembler::string_compare(Register str1, Register str2, 4270 Register cnt1, Register cnt2, Register result, 4271 Register tmp1) { 4272 Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING, 4273 NEXT_WORD, DIFFERENCE; 4274 4275 BLOCK_COMMENT("string_compare {"); 4276 4277 // Compute the minimum of the string lengths and save the difference. 4278 subsw(tmp1, cnt1, cnt2); 4279 cselw(cnt2, cnt1, cnt2, Assembler::LE); // min 4280 4281 // A very short string 4282 cmpw(cnt2, 4); 4283 br(Assembler::LT, SHORT_STRING); 4284 4285 // Check if the strings start at the same location. 4286 cmp(str1, str2); 4287 br(Assembler::EQ, LENGTH_DIFF); 4288 4289 // Compare longwords 4290 { 4291 subw(cnt2, cnt2, 4); // The last longword is a special case 4292 4293 // Move both string pointers to the last longword of their 4294 // strings, negate the remaining count, and convert it to bytes. 4295 lea(str1, Address(str1, cnt2, Address::uxtw(1))); 4296 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4297 sub(cnt2, zr, cnt2, LSL, 1); 4298 4299 // Loop, loading longwords and comparing them into rscratch2. 4300 bind(NEXT_WORD); 4301 ldr(result, Address(str1, cnt2)); 4302 ldr(cnt1, Address(str2, cnt2)); 4303 adds(cnt2, cnt2, wordSize); 4304 eor(rscratch2, result, cnt1); 4305 cbnz(rscratch2, DIFFERENCE); 4306 br(Assembler::LT, NEXT_WORD); 4307 4308 // Last longword. In the case where length == 4 we compare the 4309 // same longword twice, but that's still faster than another 4310 // conditional branch. 4311 4312 ldr(result, Address(str1)); 4313 ldr(cnt1, Address(str2)); 4314 eor(rscratch2, result, cnt1); 4315 cbz(rscratch2, LENGTH_DIFF); 4316 4317 // Find the first different characters in the longwords and 4318 // compute their difference. 4319 bind(DIFFERENCE); 4320 rev(rscratch2, rscratch2); 4321 clz(rscratch2, rscratch2); 4322 andr(rscratch2, rscratch2, -16); 4323 lsrv(result, result, rscratch2); 4324 uxthw(result, result); 4325 lsrv(cnt1, cnt1, rscratch2); 4326 uxthw(cnt1, cnt1); 4327 subw(result, result, cnt1); 4328 b(DONE); 4329 } 4330 4331 bind(SHORT_STRING); 4332 // Is the minimum length zero? 4333 cbz(cnt2, LENGTH_DIFF); 4334 4335 bind(SHORT_LOOP); 4336 load_unsigned_short(result, Address(post(str1, 2))); 4337 load_unsigned_short(cnt1, Address(post(str2, 2))); 4338 subw(result, result, cnt1); 4339 cbnz(result, DONE); 4340 sub(cnt2, cnt2, 1); 4341 cbnz(cnt2, SHORT_LOOP); 4342 4343 // Strings are equal up to min length. Return the length difference. 4344 bind(LENGTH_DIFF); 4345 mov(result, tmp1); 4346 4347 // That's it 4348 bind(DONE); 4349 4350 BLOCK_COMMENT("} string_compare"); 4351 } 4352 4353 4354 void MacroAssembler::string_equals(Register str1, Register str2, 4355 Register cnt, Register result, 4356 Register tmp1) { 4357 Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING, 4358 NEXT_WORD; 4359 4360 const Register tmp2 = rscratch1; 4361 assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2); 4362 4363 BLOCK_COMMENT("string_equals {"); 4364 4365 // Start by assuming that the strings are not equal. 4366 mov(result, zr); 4367 4368 // A very short string 4369 cmpw(cnt, 4); 4370 br(Assembler::LT, SHORT_STRING); 4371 4372 // Check if the strings start at the same location. 4373 cmp(str1, str2); 4374 br(Assembler::EQ, SAME_CHARS); 4375 4376 // Compare longwords 4377 { 4378 subw(cnt, cnt, 4); // The last longword is a special case 4379 4380 // Move both string pointers to the last longword of their 4381 // strings, negate the remaining count, and convert it to bytes. 4382 lea(str1, Address(str1, cnt, Address::uxtw(1))); 4383 lea(str2, Address(str2, cnt, Address::uxtw(1))); 4384 sub(cnt, zr, cnt, LSL, 1); 4385 4386 // Loop, loading longwords and comparing them into rscratch2. 4387 bind(NEXT_WORD); 4388 ldr(tmp1, Address(str1, cnt)); 4389 ldr(tmp2, Address(str2, cnt)); 4390 adds(cnt, cnt, wordSize); 4391 eor(rscratch2, tmp1, tmp2); 4392 cbnz(rscratch2, DONE); 4393 br(Assembler::LT, NEXT_WORD); 4394 4395 // Last longword. In the case where length == 4 we compare the 4396 // same longword twice, but that's still faster than another 4397 // conditional branch. 4398 4399 ldr(tmp1, Address(str1)); 4400 ldr(tmp2, Address(str2)); 4401 eor(rscratch2, tmp1, tmp2); 4402 cbz(rscratch2, SAME_CHARS); 4403 b(DONE); 4404 } 4405 4406 bind(SHORT_STRING); 4407 // Is the length zero? 4408 cbz(cnt, SAME_CHARS); 4409 4410 bind(SHORT_LOOP); 4411 load_unsigned_short(tmp1, Address(post(str1, 2))); 4412 load_unsigned_short(tmp2, Address(post(str2, 2))); 4413 subw(tmp1, tmp1, tmp2); 4414 cbnz(tmp1, DONE); 4415 sub(cnt, cnt, 1); 4416 cbnz(cnt, SHORT_LOOP); 4417 4418 // Strings are equal. 4419 bind(SAME_CHARS); 4420 mov(result, true); 4421 4422 // That's it 4423 bind(DONE); 4424 4425 BLOCK_COMMENT("} string_equals"); 4426 } 4427 4428 // Compare char[] arrays aligned to 4 bytes 4429 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4430 Register result, Register tmp1) 4431 { 4432 Register cnt1 = rscratch1; 4433 Register cnt2 = rscratch2; 4434 Register tmp2 = rscratch2; 4435 4436 Label SAME, DIFFER, NEXT, TAIL03, TAIL01; 4437 4438 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4439 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 4440 4441 BLOCK_COMMENT("char_arrays_equals {"); 4442 4443 // different until proven equal 4444 mov(result, false); 4445 4446 // same array? 4447 cmp(ary1, ary2); 4448 br(Assembler::EQ, SAME); 4449 4450 // ne if either null 4451 cbz(ary1, DIFFER); 4452 cbz(ary2, DIFFER); 4453 4454 // lengths ne? 4455 ldrw(cnt1, Address(ary1, length_offset)); 4456 ldrw(cnt2, Address(ary2, length_offset)); 4457 cmp(cnt1, cnt2); 4458 br(Assembler::NE, DIFFER); 4459 4460 lea(ary1, Address(ary1, base_offset)); 4461 lea(ary2, Address(ary2, base_offset)); 4462 4463 subs(cnt1, cnt1, 4); 4464 br(LT, TAIL03); 4465 4466 BIND(NEXT); 4467 ldr(tmp1, Address(post(ary1, 8))); 4468 ldr(tmp2, Address(post(ary2, 8))); 4469 subs(cnt1, cnt1, 4); 4470 eor(tmp1, tmp1, tmp2); 4471 cbnz(tmp1, DIFFER); 4472 br(GE, NEXT); 4473 4474 BIND(TAIL03); // 0-3 chars left, cnt1 = #chars left - 4 4475 tst(cnt1, 0b10); 4476 br(EQ, TAIL01); 4477 ldrw(tmp1, Address(post(ary1, 4))); 4478 ldrw(tmp2, Address(post(ary2, 4))); 4479 cmp(tmp1, tmp2); 4480 br(NE, DIFFER); 4481 BIND(TAIL01); // 0-1 chars left 4482 tst(cnt1, 0b01); 4483 br(EQ, SAME); 4484 ldrh(tmp1, ary1); 4485 ldrh(tmp2, ary2); 4486 cmp(tmp1, tmp2); 4487 br(NE, DIFFER); 4488 4489 BIND(SAME); 4490 mov(result, true); 4491 BIND(DIFFER); // result already set 4492 4493 BLOCK_COMMENT("} char_arrays_equals"); 4494 } 4495 4496 // encode char[] to byte[] in ISO_8859_1 4497 void MacroAssembler::encode_iso_array(Register src, Register dst, 4498 Register len, Register result, 4499 FloatRegister Vtmp1, FloatRegister Vtmp2, 4500 FloatRegister Vtmp3, FloatRegister Vtmp4) 4501 { 4502 Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1; 4503 Register tmp1 = rscratch1; 4504 4505 mov(result, len); // Save initial len 4506 4507 #ifndef BUILTIN_SIM 4508 subs(len, len, 32); 4509 br(LT, LOOP_8); 4510 4511 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions 4512 // to convert chars to bytes. These set the 'QC' bit in the FPSR if 4513 // any char could not fit in a byte, so clear the FPSR so we can test it. 4514 clear_fpsr(); 4515 4516 BIND(NEXT_32); 4517 ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); 4518 uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half 4519 uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half 4520 uqxtn(Vtmp2, T8B, Vtmp3, T8H); 4521 uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2 4522 get_fpsr(tmp1); 4523 cbnzw(tmp1, LOOP_8); 4524 st1(Vtmp1, Vtmp2, T16B, post(dst, 32)); 4525 subs(len, len, 32); 4526 add(src, src, 64); 4527 br(GE, NEXT_32); 4528 4529 BIND(LOOP_8); 4530 adds(len, len, 32-8); 4531 br(LT, LOOP_1); 4532 clear_fpsr(); // QC may be set from loop above, clear again 4533 BIND(NEXT_8); 4534 ld1(Vtmp1, T8H, src); 4535 uqxtn(Vtmp1, T8B, Vtmp1, T8H); 4536 get_fpsr(tmp1); 4537 cbnzw(tmp1, LOOP_1); 4538 st1(Vtmp1, T8B, post(dst, 8)); 4539 subs(len, len, 8); 4540 add(src, src, 16); 4541 br(GE, NEXT_8); 4542 4543 BIND(LOOP_1); 4544 adds(len, len, 8); 4545 br(LE, DONE); 4546 #else 4547 cbz(len, DONE); 4548 #endif 4549 BIND(NEXT_1); 4550 ldrh(tmp1, Address(post(src, 2))); 4551 tst(tmp1, 0xff00); 4552 br(NE, DONE); 4553 strb(tmp1, Address(post(dst, 1))); 4554 subs(len, len, 1); 4555 br(GT, NEXT_1); 4556 4557 BIND(DONE); 4558 sub(result, result, len); // Return index where we stopped 4559 }