1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 33 #include "compiler/disassembler.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "nativeInst_aarch64.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/node.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/icache.hpp" 40 #include "runtime/interfaceSupport.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 #if INCLUDE_ALL_GCS 44 #include "gc/g1/g1CollectedHeap.inline.hpp" 45 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 46 #include "gc/g1/heapRegion.hpp" 47 #endif 48 49 #ifdef PRODUCT 50 #define BLOCK_COMMENT(str) /* nothing */ 51 #define STOP(error) stop(error) 52 #else 53 #define BLOCK_COMMENT(str) block_comment(str) 54 #define STOP(error) block_comment(error); stop(error) 55 #endif 56 57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 58 59 // Patch any kind of instruction; there may be several instructions. 60 // Return the total length (in bytes) of the instructions. 61 int MacroAssembler::pd_patch_instruction_size(address branch, address target) { 62 int instructions = 1; 63 assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); 64 long offset = (target - branch) >> 2; 65 unsigned insn = *(unsigned*)branch; 66 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { 67 // Load register (literal) 68 Instruction_aarch64::spatch(branch, 23, 5, offset); 69 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 70 // Unconditional branch (immediate) 71 Instruction_aarch64::spatch(branch, 25, 0, offset); 72 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 73 // Conditional branch (immediate) 74 Instruction_aarch64::spatch(branch, 23, 5, offset); 75 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 76 // Compare & branch (immediate) 77 Instruction_aarch64::spatch(branch, 23, 5, offset); 78 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 79 // Test & branch (immediate) 80 Instruction_aarch64::spatch(branch, 18, 5, offset); 81 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 82 // PC-rel. addressing 83 offset = target-branch; 84 int shift = Instruction_aarch64::extract(insn, 31, 31); 85 if (shift) { 86 u_int64_t dest = (u_int64_t)target; 87 uint64_t pc_page = (uint64_t)branch >> 12; 88 uint64_t adr_page = (uint64_t)target >> 12; 89 unsigned offset_lo = dest & 0xfff; 90 offset = adr_page - pc_page; 91 92 // We handle 3 types of PC relative addressing 93 // 1 - adrp Rx, target_page 94 // ldr/str Ry, [Rx, #offset_in_page] 95 // 2 - adrp Rx, target_page 96 // add Ry, Rx, #offset_in_page 97 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 98 // In the first 2 cases we must check that Rx is the same in the adrp and the 99 // subsequent ldr/str or add instruction. Otherwise we could accidentally end 100 // up treating a type 3 relocation as a type 1 or 2 just because it happened 101 // to be followed by a random unrelated ldr/str or add instruction. 102 // 103 // In the case of a type 3 relocation, we know that these are only generated 104 // for the safepoint polling page, or for the card type byte map base so we 105 // assert as much and of course that the offset is 0. 106 // 107 unsigned insn2 = ((unsigned*)branch)[1]; 108 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 109 Instruction_aarch64::extract(insn, 4, 0) == 110 Instruction_aarch64::extract(insn2, 9, 5)) { 111 // Load/store register (unsigned immediate) 112 unsigned size = Instruction_aarch64::extract(insn2, 31, 30); 113 Instruction_aarch64::patch(branch + sizeof (unsigned), 114 21, 10, offset_lo >> size); 115 guarantee(((dest >> size) << size) == dest, "misaligned target"); 116 instructions = 2; 117 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 118 Instruction_aarch64::extract(insn, 4, 0) == 119 Instruction_aarch64::extract(insn2, 4, 0)) { 120 // add (immediate) 121 Instruction_aarch64::patch(branch + sizeof (unsigned), 122 21, 10, offset_lo); 123 instructions = 2; 124 } else { 125 assert((jbyte *)target == 126 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 127 target == StubRoutines::crc_table_addr() || 128 (address)target == os::get_polling_page(), 129 "adrp must be polling page or byte map base"); 130 assert(offset_lo == 0, "offset must be 0 for polling page or byte map base"); 131 } 132 } 133 int offset_lo = offset & 3; 134 offset >>= 2; 135 Instruction_aarch64::spatch(branch, 23, 5, offset); 136 Instruction_aarch64::patch(branch, 30, 29, offset_lo); 137 } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { 138 u_int64_t dest = (u_int64_t)target; 139 // Move wide constant 140 assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); 141 assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); 142 Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff); 143 Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff); 144 Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff); 145 assert(target_addr_for_insn(branch) == target, "should be"); 146 instructions = 3; 147 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 148 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 149 // nothing to do 150 assert(target == 0, "did not expect to relocate target for polling page load"); 151 } else { 152 ShouldNotReachHere(); 153 } 154 return instructions * NativeInstruction::instruction_size; 155 } 156 157 int MacroAssembler::patch_oop(address insn_addr, address o) { 158 int instructions; 159 unsigned insn = *(unsigned*)insn_addr; 160 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 161 162 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 163 // narrow OOPs by setting the upper 16 bits in the first 164 // instruction. 165 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 166 // Move narrow OOP 167 narrowOop n = oopDesc::encode_heap_oop((oop)o); 168 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 169 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 170 instructions = 2; 171 } else { 172 // Move wide OOP 173 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 174 uintptr_t dest = (uintptr_t)o; 175 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 176 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 177 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 178 instructions = 3; 179 } 180 return instructions * NativeInstruction::instruction_size; 181 } 182 183 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { 184 long offset = 0; 185 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { 186 // Load register (literal) 187 offset = Instruction_aarch64::sextract(insn, 23, 5); 188 return address(((uint64_t)insn_addr + (offset << 2))); 189 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 190 // Unconditional branch (immediate) 191 offset = Instruction_aarch64::sextract(insn, 25, 0); 192 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 193 // Conditional branch (immediate) 194 offset = Instruction_aarch64::sextract(insn, 23, 5); 195 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 196 // Compare & branch (immediate) 197 offset = Instruction_aarch64::sextract(insn, 23, 5); 198 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 199 // Test & branch (immediate) 200 offset = Instruction_aarch64::sextract(insn, 18, 5); 201 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 202 // PC-rel. addressing 203 offset = Instruction_aarch64::extract(insn, 30, 29); 204 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2; 205 int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0; 206 if (shift) { 207 offset <<= shift; 208 uint64_t target_page = ((uint64_t)insn_addr) + offset; 209 target_page &= ((uint64_t)-1) << shift; 210 // Return the target address for the following sequences 211 // 1 - adrp Rx, target_page 212 // ldr/str Ry, [Rx, #offset_in_page] 213 // 2 - adrp Rx, target_page ] 214 // add Ry, Rx, #offset_in_page 215 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 216 // 217 // In the first two cases we check that the register is the same and 218 // return the target_page + the offset within the page. 219 // Otherwise we assume it is a page aligned relocation and return 220 // the target page only. The only cases this is generated is for 221 // the safepoint polling page or for the card table byte map base so 222 // we assert as much. 223 // 224 unsigned insn2 = ((unsigned*)insn_addr)[1]; 225 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 226 Instruction_aarch64::extract(insn, 4, 0) == 227 Instruction_aarch64::extract(insn2, 9, 5)) { 228 // Load/store register (unsigned immediate) 229 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 230 unsigned int size = Instruction_aarch64::extract(insn2, 31, 30); 231 return address(target_page + (byte_offset << size)); 232 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 233 Instruction_aarch64::extract(insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 4, 0)) { 235 // add (immediate) 236 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 237 return address(target_page + byte_offset); 238 } else { 239 assert((jbyte *)target_page == 240 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 241 (address)target_page == os::get_polling_page(), 242 "adrp must be polling page or byte map base"); 243 return (address)target_page; 244 } 245 } else { 246 ShouldNotReachHere(); 247 } 248 } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { 249 u_int32_t *insns = (u_int32_t *)insn_addr; 250 // Move wide constant: movz, movk, movk. See movptr(). 251 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 252 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 253 return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) 254 + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 255 + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 256 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 257 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 258 return 0; 259 } else { 260 ShouldNotReachHere(); 261 } 262 return address(((uint64_t)insn_addr + (offset << 2))); 263 } 264 265 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 266 dsb(Assembler::SY); 267 } 268 269 270 void MacroAssembler::reset_last_Java_frame(bool clear_fp, 271 bool clear_pc) { 272 // we must set sp to zero to clear frame 273 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 274 // must clear fp, so that compiled frames are not confused; it is 275 // possible that we need it only for debugging 276 if (clear_fp) { 277 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 278 } 279 280 if (clear_pc) { 281 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 282 } 283 } 284 285 // Calls to C land 286 // 287 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 288 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 289 // has to be reset to 0. This is required to allow proper stack traversal. 290 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 291 Register last_java_fp, 292 Register last_java_pc, 293 Register scratch) { 294 295 if (last_java_pc->is_valid()) { 296 str(last_java_pc, Address(rthread, 297 JavaThread::frame_anchor_offset() 298 + JavaFrameAnchor::last_Java_pc_offset())); 299 } 300 301 // determine last_java_sp register 302 if (last_java_sp == sp) { 303 mov(scratch, sp); 304 last_java_sp = scratch; 305 } else if (!last_java_sp->is_valid()) { 306 last_java_sp = esp; 307 } 308 309 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 310 311 // last_java_fp is optional 312 if (last_java_fp->is_valid()) { 313 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 314 } 315 } 316 317 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 318 Register last_java_fp, 319 address last_java_pc, 320 Register scratch) { 321 if (last_java_pc != NULL) { 322 adr(scratch, last_java_pc); 323 } else { 324 // FIXME: This is almost never correct. We should delete all 325 // cases of set_last_Java_frame with last_java_pc=NULL and use the 326 // correct return address instead. 327 adr(scratch, pc()); 328 } 329 330 str(scratch, Address(rthread, 331 JavaThread::frame_anchor_offset() 332 + JavaFrameAnchor::last_Java_pc_offset())); 333 334 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 335 } 336 337 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 338 Register last_java_fp, 339 Label &L, 340 Register scratch) { 341 if (L.is_bound()) { 342 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 343 } else { 344 InstructionMark im(this); 345 L.add_patch_at(code(), locator()); 346 set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch); 347 } 348 } 349 350 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { 351 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 352 assert(CodeCache::find_blob(entry.target()) != NULL, 353 "destination of far call not found in code cache"); 354 if (far_branches()) { 355 unsigned long offset; 356 // We can use ADRP here because we know that the total size of 357 // the code cache cannot exceed 2Gb. 358 adrp(tmp, entry, offset); 359 add(tmp, tmp, offset); 360 if (cbuf) cbuf->set_insts_mark(); 361 blr(tmp); 362 } else { 363 if (cbuf) cbuf->set_insts_mark(); 364 bl(entry); 365 } 366 } 367 368 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { 369 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 370 assert(CodeCache::find_blob(entry.target()) != NULL, 371 "destination of far call not found in code cache"); 372 if (far_branches()) { 373 unsigned long offset; 374 // We can use ADRP here because we know that the total size of 375 // the code cache cannot exceed 2Gb. 376 adrp(tmp, entry, offset); 377 add(tmp, tmp, offset); 378 if (cbuf) cbuf->set_insts_mark(); 379 br(tmp); 380 } else { 381 if (cbuf) cbuf->set_insts_mark(); 382 b(entry); 383 } 384 } 385 386 int MacroAssembler::biased_locking_enter(Register lock_reg, 387 Register obj_reg, 388 Register swap_reg, 389 Register tmp_reg, 390 bool swap_reg_contains_mark, 391 Label& done, 392 Label* slow_case, 393 BiasedLockingCounters* counters) { 394 assert(UseBiasedLocking, "why call this otherwise?"); 395 assert_different_registers(lock_reg, obj_reg, swap_reg); 396 397 if (PrintBiasedLockingStatistics && counters == NULL) 398 counters = BiasedLocking::counters(); 399 400 bool need_tmp_reg = false; 401 if (tmp_reg == noreg) { 402 tmp_reg = rscratch2; 403 } 404 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1); 405 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 406 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 407 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); 408 Address saved_mark_addr(lock_reg, 0); 409 410 // Biased locking 411 // See whether the lock is currently biased toward our thread and 412 // whether the epoch is still valid 413 // Note that the runtime guarantees sufficient alignment of JavaThread 414 // pointers to allow age to be placed into low bits 415 // First check to see whether biasing is even enabled for this object 416 Label cas_label; 417 int null_check_offset = -1; 418 if (!swap_reg_contains_mark) { 419 null_check_offset = offset(); 420 ldr(swap_reg, mark_addr); 421 } 422 andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); 423 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 424 br(Assembler::NE, cas_label); 425 // The bias pattern is present in the object's header. Need to check 426 // whether the bias owner and the epoch are both still current. 427 load_prototype_header(tmp_reg, obj_reg); 428 orr(tmp_reg, tmp_reg, rthread); 429 eor(tmp_reg, swap_reg, tmp_reg); 430 andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place)); 431 if (counters != NULL) { 432 Label around; 433 cbnz(tmp_reg, around); 434 atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1); 435 b(done); 436 bind(around); 437 } else { 438 cbz(tmp_reg, done); 439 } 440 441 Label try_revoke_bias; 442 Label try_rebias; 443 444 // At this point we know that the header has the bias pattern and 445 // that we are not the bias owner in the current epoch. We need to 446 // figure out more details about the state of the header in order to 447 // know what operations can be legally performed on the object's 448 // header. 449 450 // If the low three bits in the xor result aren't clear, that means 451 // the prototype header is no longer biased and we have to revoke 452 // the bias on this object. 453 andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place); 454 cbnz(rscratch1, try_revoke_bias); 455 456 // Biasing is still enabled for this data type. See whether the 457 // epoch of the current bias is still valid, meaning that the epoch 458 // bits of the mark word are equal to the epoch bits of the 459 // prototype header. (Note that the prototype header's epoch bits 460 // only change at a safepoint.) If not, attempt to rebias the object 461 // toward the current thread. Note that we must be absolutely sure 462 // that the current epoch is invalid in order to do this because 463 // otherwise the manipulations it performs on the mark word are 464 // illegal. 465 andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place); 466 cbnz(rscratch1, try_rebias); 467 468 // The epoch of the current bias is still valid but we know nothing 469 // about the owner; it might be set or it might be clear. Try to 470 // acquire the bias of the object using an atomic operation. If this 471 // fails we will go in to the runtime to revoke the object's bias. 472 // Note that we first construct the presumed unbiased header so we 473 // don't accidentally blow away another thread's valid bias. 474 { 475 Label here; 476 mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 477 andr(swap_reg, swap_reg, rscratch1); 478 orr(tmp_reg, swap_reg, rthread); 479 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 480 // If the biasing toward our thread failed, this means that 481 // another thread succeeded in biasing it toward itself and we 482 // need to revoke that bias. The revocation will occur in the 483 // interpreter runtime in the slow case. 484 bind(here); 485 if (counters != NULL) { 486 atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), 487 tmp_reg, rscratch1); 488 } 489 } 490 b(done); 491 492 bind(try_rebias); 493 // At this point we know the epoch has expired, meaning that the 494 // current "bias owner", if any, is actually invalid. Under these 495 // circumstances _only_, we are allowed to use the current header's 496 // value as the comparison value when doing the cas to acquire the 497 // bias in the current epoch. In other words, we allow transfer of 498 // the bias from one thread to another directly in this situation. 499 // 500 // FIXME: due to a lack of registers we currently blow away the age 501 // bits in this situation. Should attempt to preserve them. 502 { 503 Label here; 504 load_prototype_header(tmp_reg, obj_reg); 505 orr(tmp_reg, rthread, tmp_reg); 506 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 507 // If the biasing toward our thread failed, then another thread 508 // succeeded in biasing it toward itself and we need to revoke that 509 // bias. The revocation will occur in the runtime in the slow case. 510 bind(here); 511 if (counters != NULL) { 512 atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), 513 tmp_reg, rscratch1); 514 } 515 } 516 b(done); 517 518 bind(try_revoke_bias); 519 // The prototype mark in the klass doesn't have the bias bit set any 520 // more, indicating that objects of this data type are not supposed 521 // to be biased any more. We are going to try to reset the mark of 522 // this object to the prototype value and fall through to the 523 // CAS-based locking scheme. Note that if our CAS fails, it means 524 // that another thread raced us for the privilege of revoking the 525 // bias of this particular object, so it's okay to continue in the 526 // normal locking code. 527 // 528 // FIXME: due to a lack of registers we currently blow away the age 529 // bits in this situation. Should attempt to preserve them. 530 { 531 Label here, nope; 532 load_prototype_header(tmp_reg, obj_reg); 533 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); 534 bind(here); 535 536 // Fall through to the normal CAS-based lock, because no matter what 537 // the result of the above CAS, some thread must have succeeded in 538 // removing the bias bit from the object's header. 539 if (counters != NULL) { 540 atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg, 541 rscratch1); 542 } 543 bind(nope); 544 } 545 546 bind(cas_label); 547 548 return null_check_offset; 549 } 550 551 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 552 assert(UseBiasedLocking, "why call this otherwise?"); 553 554 // Check for biased locking unlock case, which is a no-op 555 // Note: we do not have to check the thread ID for two reasons. 556 // First, the interpreter checks for IllegalMonitorStateException at 557 // a higher level. Second, if the bias was revoked while we held the 558 // lock, the object could not be rebiased toward another thread, so 559 // the bias bit would be clear. 560 ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 561 andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 562 cmp(temp_reg, markOopDesc::biased_lock_pattern); 563 br(Assembler::EQ, done); 564 } 565 566 567 // added to make this compile 568 569 REGISTER_DEFINITION(Register, noreg); 570 571 static void pass_arg0(MacroAssembler* masm, Register arg) { 572 if (c_rarg0 != arg ) { 573 masm->mov(c_rarg0, arg); 574 } 575 } 576 577 static void pass_arg1(MacroAssembler* masm, Register arg) { 578 if (c_rarg1 != arg ) { 579 masm->mov(c_rarg1, arg); 580 } 581 } 582 583 static void pass_arg2(MacroAssembler* masm, Register arg) { 584 if (c_rarg2 != arg ) { 585 masm->mov(c_rarg2, arg); 586 } 587 } 588 589 static void pass_arg3(MacroAssembler* masm, Register arg) { 590 if (c_rarg3 != arg ) { 591 masm->mov(c_rarg3, arg); 592 } 593 } 594 595 void MacroAssembler::call_VM_base(Register oop_result, 596 Register java_thread, 597 Register last_java_sp, 598 address entry_point, 599 int number_of_arguments, 600 bool check_exceptions) { 601 // determine java_thread register 602 if (!java_thread->is_valid()) { 603 java_thread = rthread; 604 } 605 606 // determine last_java_sp register 607 if (!last_java_sp->is_valid()) { 608 last_java_sp = esp; 609 } 610 611 // debugging support 612 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 613 assert(java_thread == rthread, "unexpected register"); 614 #ifdef ASSERT 615 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 616 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 617 #endif // ASSERT 618 619 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 620 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 621 622 // push java thread (becomes first argument of C function) 623 624 mov(c_rarg0, java_thread); 625 626 // set last Java frame before call 627 assert(last_java_sp != rfp, "can't use rfp"); 628 629 Label l; 630 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 631 632 // do the call, remove parameters 633 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 634 635 // reset last Java frame 636 // Only interpreter should have to clear fp 637 reset_last_Java_frame(true, true); 638 639 // C++ interp handles this in the interpreter 640 check_and_handle_popframe(java_thread); 641 check_and_handle_earlyret(java_thread); 642 643 if (check_exceptions) { 644 // check for pending exceptions (java_thread is set upon return) 645 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 646 Label ok; 647 cbz(rscratch1, ok); 648 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 649 br(rscratch1); 650 bind(ok); 651 } 652 653 // get oop result if there is one and reset the value in the thread 654 if (oop_result->is_valid()) { 655 get_vm_result(oop_result, java_thread); 656 } 657 } 658 659 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 660 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 661 } 662 663 // Maybe emit a call via a trampoline. If the code cache is small 664 // trampolines won't be emitted. 665 666 void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) { 667 assert(entry.rspec().type() == relocInfo::runtime_call_type 668 || entry.rspec().type() == relocInfo::opt_virtual_call_type 669 || entry.rspec().type() == relocInfo::static_call_type 670 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 671 672 unsigned int start_offset = offset(); 673 if (far_branches() && !Compile::current()->in_scratch_emit_size()) { 674 emit_trampoline_stub(offset(), entry.target()); 675 } 676 677 if (cbuf) cbuf->set_insts_mark(); 678 relocate(entry.rspec()); 679 if (Assembler::reachable_from_branch_at(pc(), entry.target())) { 680 bl(entry.target()); 681 } else { 682 bl(pc()); 683 } 684 } 685 686 687 // Emit a trampoline stub for a call to a target which is too far away. 688 // 689 // code sequences: 690 // 691 // call-site: 692 // branch-and-link to <destination> or <trampoline stub> 693 // 694 // Related trampoline stub for this call site in the stub section: 695 // load the call target from the constant pool 696 // branch (LR still points to the call site above) 697 698 void MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 699 address dest) { 700 address stub = start_a_stub(Compile::MAX_stubs_size/2); 701 if (stub == NULL) { 702 start_a_stub(Compile::MAX_stubs_size/2); 703 Compile::current()->env()->record_out_of_memory_failure(); 704 return; 705 } 706 707 // Create a trampoline stub relocation which relates this trampoline stub 708 // with the call instruction at insts_call_instruction_offset in the 709 // instructions code-section. 710 align(wordSize); 711 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 712 + insts_call_instruction_offset)); 713 const int stub_start_offset = offset(); 714 715 // Now, create the trampoline stub's code: 716 // - load the call 717 // - call 718 Label target; 719 ldr(rscratch1, target); 720 br(rscratch1); 721 bind(target); 722 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 723 "should be"); 724 emit_int64((int64_t)dest); 725 726 const address stub_start_addr = addr_at(stub_start_offset); 727 728 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 729 730 end_a_stub(); 731 } 732 733 void MacroAssembler::ic_call(address entry) { 734 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 735 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 736 // unsigned long offset; 737 // ldr_constant(rscratch2, const_ptr); 738 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 739 trampoline_call(Address(entry, rh)); 740 } 741 742 // Implementation of call_VM versions 743 744 void MacroAssembler::call_VM(Register oop_result, 745 address entry_point, 746 bool check_exceptions) { 747 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 748 } 749 750 void MacroAssembler::call_VM(Register oop_result, 751 address entry_point, 752 Register arg_1, 753 bool check_exceptions) { 754 pass_arg1(this, arg_1); 755 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 756 } 757 758 void MacroAssembler::call_VM(Register oop_result, 759 address entry_point, 760 Register arg_1, 761 Register arg_2, 762 bool check_exceptions) { 763 assert(arg_1 != c_rarg2, "smashed arg"); 764 pass_arg2(this, arg_2); 765 pass_arg1(this, arg_1); 766 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 767 } 768 769 void MacroAssembler::call_VM(Register oop_result, 770 address entry_point, 771 Register arg_1, 772 Register arg_2, 773 Register arg_3, 774 bool check_exceptions) { 775 assert(arg_1 != c_rarg3, "smashed arg"); 776 assert(arg_2 != c_rarg3, "smashed arg"); 777 pass_arg3(this, arg_3); 778 779 assert(arg_1 != c_rarg2, "smashed arg"); 780 pass_arg2(this, arg_2); 781 782 pass_arg1(this, arg_1); 783 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 784 } 785 786 void MacroAssembler::call_VM(Register oop_result, 787 Register last_java_sp, 788 address entry_point, 789 int number_of_arguments, 790 bool check_exceptions) { 791 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 792 } 793 794 void MacroAssembler::call_VM(Register oop_result, 795 Register last_java_sp, 796 address entry_point, 797 Register arg_1, 798 bool check_exceptions) { 799 pass_arg1(this, arg_1); 800 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 801 } 802 803 void MacroAssembler::call_VM(Register oop_result, 804 Register last_java_sp, 805 address entry_point, 806 Register arg_1, 807 Register arg_2, 808 bool check_exceptions) { 809 810 assert(arg_1 != c_rarg2, "smashed arg"); 811 pass_arg2(this, arg_2); 812 pass_arg1(this, arg_1); 813 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 814 } 815 816 void MacroAssembler::call_VM(Register oop_result, 817 Register last_java_sp, 818 address entry_point, 819 Register arg_1, 820 Register arg_2, 821 Register arg_3, 822 bool check_exceptions) { 823 assert(arg_1 != c_rarg3, "smashed arg"); 824 assert(arg_2 != c_rarg3, "smashed arg"); 825 pass_arg3(this, arg_3); 826 assert(arg_1 != c_rarg2, "smashed arg"); 827 pass_arg2(this, arg_2); 828 pass_arg1(this, arg_1); 829 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 830 } 831 832 833 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 834 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 835 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 836 verify_oop(oop_result, "broken oop in call_VM_base"); 837 } 838 839 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 840 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 841 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 842 } 843 844 void MacroAssembler::align(int modulus) { 845 while (offset() % modulus != 0) nop(); 846 } 847 848 // these are no-ops overridden by InterpreterMacroAssembler 849 850 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 851 852 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 853 854 855 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 856 Register tmp, 857 int offset) { 858 intptr_t value = *delayed_value_addr; 859 if (value != 0) 860 return RegisterOrConstant(value + offset); 861 862 // load indirectly to solve generation ordering problem 863 ldr(tmp, ExternalAddress((address) delayed_value_addr)); 864 865 if (offset != 0) 866 add(tmp, tmp, offset); 867 868 return RegisterOrConstant(tmp); 869 } 870 871 872 void MacroAssembler:: notify(int type) { 873 if (type == bytecode_start) { 874 // set_last_Java_frame(esp, rfp, (address)NULL); 875 Assembler:: notify(type); 876 // reset_last_Java_frame(true, false); 877 } 878 else 879 Assembler:: notify(type); 880 } 881 882 // Look up the method for a megamorphic invokeinterface call. 883 // The target method is determined by <intf_klass, itable_index>. 884 // The receiver klass is in recv_klass. 885 // On success, the result will be in method_result, and execution falls through. 886 // On failure, execution transfers to the given label. 887 void MacroAssembler::lookup_interface_method(Register recv_klass, 888 Register intf_klass, 889 RegisterOrConstant itable_index, 890 Register method_result, 891 Register scan_temp, 892 Label& L_no_such_interface) { 893 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 894 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 895 "caller must use same register for non-constant itable index as for method"); 896 897 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 898 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 899 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 900 int scan_step = itableOffsetEntry::size() * wordSize; 901 int vte_size = vtableEntry::size() * wordSize; 902 assert(vte_size == wordSize, "else adjust times_vte_scale"); 903 904 ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); 905 906 // %%% Could store the aligned, prescaled offset in the klassoop. 907 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 908 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 909 add(scan_temp, scan_temp, vtable_base); 910 if (HeapWordsPerLong > 1) { 911 // Round up to align_object_offset boundary 912 // see code for instanceKlass::start_of_itable! 913 round_to(scan_temp, BytesPerLong); 914 } 915 916 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 917 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 918 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 919 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 920 if (itentry_off) 921 add(recv_klass, recv_klass, itentry_off); 922 923 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 924 // if (scan->interface() == intf) { 925 // result = (klass + scan->offset() + itable_index); 926 // } 927 // } 928 Label search, found_method; 929 930 for (int peel = 1; peel >= 0; peel--) { 931 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 932 cmp(intf_klass, method_result); 933 934 if (peel) { 935 br(Assembler::EQ, found_method); 936 } else { 937 br(Assembler::NE, search); 938 // (invert the test to fall through to found_method...) 939 } 940 941 if (!peel) break; 942 943 bind(search); 944 945 // Check that the previous entry is non-null. A null entry means that 946 // the receiver class doesn't implement the interface, and wasn't the 947 // same as when the caller was compiled. 948 cbz(method_result, L_no_such_interface); 949 add(scan_temp, scan_temp, scan_step); 950 } 951 952 bind(found_method); 953 954 // Got a hit. 955 ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 956 ldr(method_result, Address(recv_klass, scan_temp)); 957 } 958 959 // virtual method calling 960 void MacroAssembler::lookup_virtual_method(Register recv_klass, 961 RegisterOrConstant vtable_index, 962 Register method_result) { 963 const int base = InstanceKlass::vtable_start_offset() * wordSize; 964 assert(vtableEntry::size() * wordSize == 8, 965 "adjust the scaling in the code below"); 966 int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); 967 968 if (vtable_index.is_register()) { 969 lea(method_result, Address(recv_klass, 970 vtable_index.as_register(), 971 Address::lsl(LogBytesPerWord))); 972 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 973 } else { 974 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 975 ldr(method_result, Address(recv_klass, vtable_offset_in_bytes)); 976 } 977 } 978 979 void MacroAssembler::check_klass_subtype(Register sub_klass, 980 Register super_klass, 981 Register temp_reg, 982 Label& L_success) { 983 Label L_failure; 984 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 985 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 986 bind(L_failure); 987 } 988 989 990 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 991 Register super_klass, 992 Register temp_reg, 993 Label* L_success, 994 Label* L_failure, 995 Label* L_slow_path, 996 RegisterOrConstant super_check_offset) { 997 assert_different_registers(sub_klass, super_klass, temp_reg); 998 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 999 if (super_check_offset.is_register()) { 1000 assert_different_registers(sub_klass, super_klass, 1001 super_check_offset.as_register()); 1002 } else if (must_load_sco) { 1003 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1004 } 1005 1006 Label L_fallthrough; 1007 int label_nulls = 0; 1008 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1009 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1010 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1011 assert(label_nulls <= 1, "at most one NULL in the batch"); 1012 1013 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1014 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1015 Address super_check_offset_addr(super_klass, sco_offset); 1016 1017 // Hacked jmp, which may only be used just before L_fallthrough. 1018 #define final_jmp(label) \ 1019 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1020 else b(label) /*omit semi*/ 1021 1022 // If the pointers are equal, we are done (e.g., String[] elements). 1023 // This self-check enables sharing of secondary supertype arrays among 1024 // non-primary types such as array-of-interface. Otherwise, each such 1025 // type would need its own customized SSA. 1026 // We move this check to the front of the fast path because many 1027 // type checks are in fact trivially successful in this manner, 1028 // so we get a nicely predicted branch right at the start of the check. 1029 cmp(sub_klass, super_klass); 1030 br(Assembler::EQ, *L_success); 1031 1032 // Check the supertype display: 1033 if (must_load_sco) { 1034 ldrw(temp_reg, super_check_offset_addr); 1035 super_check_offset = RegisterOrConstant(temp_reg); 1036 } 1037 Address super_check_addr(sub_klass, super_check_offset); 1038 ldr(rscratch1, super_check_addr); 1039 cmp(super_klass, rscratch1); // load displayed supertype 1040 1041 // This check has worked decisively for primary supers. 1042 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1043 // (Secondary supers are interfaces and very deeply nested subtypes.) 1044 // This works in the same check above because of a tricky aliasing 1045 // between the super_cache and the primary super display elements. 1046 // (The 'super_check_addr' can address either, as the case requires.) 1047 // Note that the cache is updated below if it does not help us find 1048 // what we need immediately. 1049 // So if it was a primary super, we can just fail immediately. 1050 // Otherwise, it's the slow path for us (no success at this point). 1051 1052 if (super_check_offset.is_register()) { 1053 br(Assembler::EQ, *L_success); 1054 cmp(super_check_offset.as_register(), sc_offset); 1055 if (L_failure == &L_fallthrough) { 1056 br(Assembler::EQ, *L_slow_path); 1057 } else { 1058 br(Assembler::NE, *L_failure); 1059 final_jmp(*L_slow_path); 1060 } 1061 } else if (super_check_offset.as_constant() == sc_offset) { 1062 // Need a slow path; fast failure is impossible. 1063 if (L_slow_path == &L_fallthrough) { 1064 br(Assembler::EQ, *L_success); 1065 } else { 1066 br(Assembler::NE, *L_slow_path); 1067 final_jmp(*L_success); 1068 } 1069 } else { 1070 // No slow path; it's a fast decision. 1071 if (L_failure == &L_fallthrough) { 1072 br(Assembler::EQ, *L_success); 1073 } else { 1074 br(Assembler::NE, *L_failure); 1075 final_jmp(*L_success); 1076 } 1077 } 1078 1079 bind(L_fallthrough); 1080 1081 #undef final_jmp 1082 } 1083 1084 // These two are taken from x86, but they look generally useful 1085 1086 // scans count pointer sized words at [addr] for occurence of value, 1087 // generic 1088 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1089 Register scratch) { 1090 Label Lloop, Lexit; 1091 cbz(count, Lexit); 1092 bind(Lloop); 1093 ldr(scratch, post(addr, wordSize)); 1094 cmp(value, scratch); 1095 br(EQ, Lexit); 1096 sub(count, count, 1); 1097 cbnz(count, Lloop); 1098 bind(Lexit); 1099 } 1100 1101 // scans count 4 byte words at [addr] for occurence of value, 1102 // generic 1103 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1104 Register scratch) { 1105 Label Lloop, Lexit; 1106 cbz(count, Lexit); 1107 bind(Lloop); 1108 ldrw(scratch, post(addr, wordSize)); 1109 cmpw(value, scratch); 1110 br(EQ, Lexit); 1111 sub(count, count, 1); 1112 cbnz(count, Lloop); 1113 bind(Lexit); 1114 } 1115 1116 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1117 Register super_klass, 1118 Register temp_reg, 1119 Register temp2_reg, 1120 Label* L_success, 1121 Label* L_failure, 1122 bool set_cond_codes) { 1123 assert_different_registers(sub_klass, super_klass, temp_reg); 1124 if (temp2_reg != noreg) 1125 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1126 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1127 1128 Label L_fallthrough; 1129 int label_nulls = 0; 1130 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1131 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1132 assert(label_nulls <= 1, "at most one NULL in the batch"); 1133 1134 // a couple of useful fields in sub_klass: 1135 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1136 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1137 Address secondary_supers_addr(sub_klass, ss_offset); 1138 Address super_cache_addr( sub_klass, sc_offset); 1139 1140 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1141 1142 // Do a linear scan of the secondary super-klass chain. 1143 // This code is rarely used, so simplicity is a virtue here. 1144 // The repne_scan instruction uses fixed registers, which we must spill. 1145 // Don't worry too much about pre-existing connections with the input regs. 1146 1147 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1148 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1149 1150 // Get super_klass value into r0 (even if it was in r5 or r2). 1151 RegSet pushed_registers; 1152 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1153 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1154 1155 if (super_klass != r0 || UseCompressedOops) { 1156 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1157 } 1158 1159 push(pushed_registers, sp); 1160 1161 #ifndef PRODUCT 1162 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1163 Address pst_counter_addr(rscratch2); 1164 ldr(rscratch1, pst_counter_addr); 1165 add(rscratch1, rscratch1, 1); 1166 str(rscratch1, pst_counter_addr); 1167 #endif //PRODUCT 1168 1169 // We will consult the secondary-super array. 1170 ldr(r5, secondary_supers_addr); 1171 // Load the array length. 1172 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1173 // Skip to start of data. 1174 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1175 1176 cmp(sp, zr); // Clear Z flag; SP is never zero 1177 // Scan R2 words at [R5] for an occurrence of R0. 1178 // Set NZ/Z based on last compare. 1179 repne_scan(r5, r0, r2, rscratch1); 1180 1181 // Unspill the temp. registers: 1182 pop(pushed_registers, sp); 1183 1184 br(Assembler::NE, *L_failure); 1185 1186 // Success. Cache the super we found and proceed in triumph. 1187 str(super_klass, super_cache_addr); 1188 1189 if (L_success != &L_fallthrough) { 1190 b(*L_success); 1191 } 1192 1193 #undef IS_A_TEMP 1194 1195 bind(L_fallthrough); 1196 } 1197 1198 1199 void MacroAssembler::verify_oop(Register reg, const char* s) { 1200 if (!VerifyOops) return; 1201 1202 // Pass register number to verify_oop_subroutine 1203 const char* b = NULL; 1204 { 1205 ResourceMark rm; 1206 stringStream ss; 1207 ss.print("verify_oop: %s: %s", reg->name(), s); 1208 b = code_string(ss.as_string()); 1209 } 1210 BLOCK_COMMENT("verify_oop {"); 1211 1212 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1213 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1214 1215 mov(r0, reg); 1216 mov(rscratch1, (address)b); 1217 1218 // call indirectly to solve generation ordering problem 1219 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1220 ldr(rscratch2, Address(rscratch2)); 1221 blr(rscratch2); 1222 1223 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1224 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1225 1226 BLOCK_COMMENT("} verify_oop"); 1227 } 1228 1229 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 1230 if (!VerifyOops) return; 1231 1232 const char* b = NULL; 1233 { 1234 ResourceMark rm; 1235 stringStream ss; 1236 ss.print("verify_oop_addr: %s", s); 1237 b = code_string(ss.as_string()); 1238 } 1239 BLOCK_COMMENT("verify_oop_addr {"); 1240 1241 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1242 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1243 1244 // addr may contain sp so we will have to adjust it based on the 1245 // pushes that we just did. 1246 if (addr.uses(sp)) { 1247 lea(r0, addr); 1248 ldr(r0, Address(r0, 4 * wordSize)); 1249 } else { 1250 ldr(r0, addr); 1251 } 1252 mov(rscratch1, (address)b); 1253 1254 // call indirectly to solve generation ordering problem 1255 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1256 ldr(rscratch2, Address(rscratch2)); 1257 blr(rscratch2); 1258 1259 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1260 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1261 1262 BLOCK_COMMENT("} verify_oop_addr"); 1263 } 1264 1265 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1266 int extra_slot_offset) { 1267 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1268 int stackElementSize = Interpreter::stackElementSize; 1269 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1270 #ifdef ASSERT 1271 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1272 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1273 #endif 1274 if (arg_slot.is_constant()) { 1275 return Address(esp, arg_slot.as_constant() * stackElementSize 1276 + offset); 1277 } else { 1278 add(rscratch1, esp, arg_slot.as_register(), 1279 ext::uxtx, exact_log2(stackElementSize)); 1280 return Address(rscratch1, offset); 1281 } 1282 } 1283 1284 void MacroAssembler::call_VM_leaf_base(address entry_point, 1285 int number_of_arguments, 1286 Label *retaddr) { 1287 call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr); 1288 } 1289 1290 void MacroAssembler::call_VM_leaf_base1(address entry_point, 1291 int number_of_gp_arguments, 1292 int number_of_fp_arguments, 1293 ret_type type, 1294 Label *retaddr) { 1295 Label E, L; 1296 1297 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1298 1299 // We add 1 to number_of_arguments because the thread in arg0 is 1300 // not counted 1301 mov(rscratch1, entry_point); 1302 blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type); 1303 if (retaddr) 1304 bind(*retaddr); 1305 1306 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1307 maybe_isb(); 1308 } 1309 1310 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1311 call_VM_leaf_base(entry_point, number_of_arguments); 1312 } 1313 1314 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1315 pass_arg0(this, arg_0); 1316 call_VM_leaf_base(entry_point, 1); 1317 } 1318 1319 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1320 pass_arg0(this, arg_0); 1321 pass_arg1(this, arg_1); 1322 call_VM_leaf_base(entry_point, 2); 1323 } 1324 1325 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1326 Register arg_1, Register arg_2) { 1327 pass_arg0(this, arg_0); 1328 pass_arg1(this, arg_1); 1329 pass_arg2(this, arg_2); 1330 call_VM_leaf_base(entry_point, 3); 1331 } 1332 1333 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1334 pass_arg0(this, arg_0); 1335 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1336 } 1337 1338 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1339 1340 assert(arg_0 != c_rarg1, "smashed arg"); 1341 pass_arg1(this, arg_1); 1342 pass_arg0(this, arg_0); 1343 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1344 } 1345 1346 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1347 assert(arg_0 != c_rarg2, "smashed arg"); 1348 assert(arg_1 != c_rarg2, "smashed arg"); 1349 pass_arg2(this, arg_2); 1350 assert(arg_0 != c_rarg1, "smashed arg"); 1351 pass_arg1(this, arg_1); 1352 pass_arg0(this, arg_0); 1353 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1354 } 1355 1356 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1357 assert(arg_0 != c_rarg3, "smashed arg"); 1358 assert(arg_1 != c_rarg3, "smashed arg"); 1359 assert(arg_2 != c_rarg3, "smashed arg"); 1360 pass_arg3(this, arg_3); 1361 assert(arg_0 != c_rarg2, "smashed arg"); 1362 assert(arg_1 != c_rarg2, "smashed arg"); 1363 pass_arg2(this, arg_2); 1364 assert(arg_0 != c_rarg1, "smashed arg"); 1365 pass_arg1(this, arg_1); 1366 pass_arg0(this, arg_0); 1367 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1368 } 1369 1370 void MacroAssembler::null_check(Register reg, int offset) { 1371 if (needs_explicit_null_check(offset)) { 1372 // provoke OS NULL exception if reg = NULL by 1373 // accessing M[reg] w/o changing any registers 1374 // NOTE: this is plenty to provoke a segv 1375 ldr(zr, Address(reg)); 1376 } else { 1377 // nothing to do, (later) access of M[reg + offset] 1378 // will provoke OS NULL exception if reg = NULL 1379 } 1380 } 1381 1382 // MacroAssembler protected routines needed to implement 1383 // public methods 1384 1385 void MacroAssembler::mov(Register r, Address dest) { 1386 code_section()->relocate(pc(), dest.rspec()); 1387 u_int64_t imm64 = (u_int64_t)dest.target(); 1388 movptr(r, imm64); 1389 } 1390 1391 // Move a constant pointer into r. In AArch64 mode the virtual 1392 // address space is 48 bits in size, so we only need three 1393 // instructions to create a patchable instruction sequence that can 1394 // reach anywhere. 1395 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1396 #ifndef PRODUCT 1397 { 1398 char buffer[64]; 1399 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1400 block_comment(buffer); 1401 } 1402 #endif 1403 assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); 1404 movz(r, imm64 & 0xffff); 1405 imm64 >>= 16; 1406 movk(r, imm64 & 0xffff, 16); 1407 imm64 >>= 16; 1408 movk(r, imm64 & 0xffff, 32); 1409 } 1410 1411 // Macro to mov replicated immediate to vector register. 1412 // Vd will get the following values for different arrangements in T 1413 // imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh 1414 // imm32 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1415 // imm32 == hex 0000efgh T4H: Vd = efghefghefghefgh 1416 // imm32 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1417 // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1418 // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1419 // T1D/T2D: invalid 1420 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { 1421 assert(T != T1D && T != T2D, "invalid arrangement"); 1422 if (T == T8B || T == T16B) { 1423 assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); 1424 movi(Vd, T, imm32 & 0xff, 0); 1425 return; 1426 } 1427 u_int32_t nimm32 = ~imm32; 1428 if (T == T4H || T == T8H) { 1429 assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); 1430 imm32 &= 0xffff; 1431 nimm32 &= 0xffff; 1432 } 1433 u_int32_t x = imm32; 1434 int movi_cnt = 0; 1435 int movn_cnt = 0; 1436 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } 1437 x = nimm32; 1438 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } 1439 if (movn_cnt < movi_cnt) imm32 = nimm32; 1440 unsigned lsl = 0; 1441 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1442 if (movn_cnt < movi_cnt) 1443 mvni(Vd, T, imm32 & 0xff, lsl); 1444 else 1445 movi(Vd, T, imm32 & 0xff, lsl); 1446 imm32 >>= 8; lsl += 8; 1447 while (imm32) { 1448 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1449 if (movn_cnt < movi_cnt) 1450 bici(Vd, T, imm32 & 0xff, lsl); 1451 else 1452 orri(Vd, T, imm32 & 0xff, lsl); 1453 lsl += 8; imm32 >>= 8; 1454 } 1455 } 1456 1457 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) 1458 { 1459 #ifndef PRODUCT 1460 { 1461 char buffer[64]; 1462 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1463 block_comment(buffer); 1464 } 1465 #endif 1466 if (operand_valid_for_logical_immediate(false, imm64)) { 1467 orr(dst, zr, imm64); 1468 } else { 1469 // we can use a combination of MOVZ or MOVN with 1470 // MOVK to build up the constant 1471 u_int64_t imm_h[4]; 1472 int zero_count = 0; 1473 int neg_count = 0; 1474 int i; 1475 for (i = 0; i < 4; i++) { 1476 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1477 if (imm_h[i] == 0) { 1478 zero_count++; 1479 } else if (imm_h[i] == 0xffffL) { 1480 neg_count++; 1481 } 1482 } 1483 if (zero_count == 4) { 1484 // one MOVZ will do 1485 movz(dst, 0); 1486 } else if (neg_count == 4) { 1487 // one MOVN will do 1488 movn(dst, 0); 1489 } else if (zero_count == 3) { 1490 for (i = 0; i < 4; i++) { 1491 if (imm_h[i] != 0L) { 1492 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1493 break; 1494 } 1495 } 1496 } else if (neg_count == 3) { 1497 // one MOVN will do 1498 for (int i = 0; i < 4; i++) { 1499 if (imm_h[i] != 0xffffL) { 1500 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1501 break; 1502 } 1503 } 1504 } else if (zero_count == 2) { 1505 // one MOVZ and one MOVK will do 1506 for (i = 0; i < 3; i++) { 1507 if (imm_h[i] != 0L) { 1508 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1509 i++; 1510 break; 1511 } 1512 } 1513 for (;i < 4; i++) { 1514 if (imm_h[i] != 0L) { 1515 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1516 } 1517 } 1518 } else if (neg_count == 2) { 1519 // one MOVN and one MOVK will do 1520 for (i = 0; i < 4; i++) { 1521 if (imm_h[i] != 0xffffL) { 1522 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1523 i++; 1524 break; 1525 } 1526 } 1527 for (;i < 4; i++) { 1528 if (imm_h[i] != 0xffffL) { 1529 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1530 } 1531 } 1532 } else if (zero_count == 1) { 1533 // one MOVZ and two MOVKs will do 1534 for (i = 0; i < 4; i++) { 1535 if (imm_h[i] != 0L) { 1536 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1537 i++; 1538 break; 1539 } 1540 } 1541 for (;i < 4; i++) { 1542 if (imm_h[i] != 0x0L) { 1543 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1544 } 1545 } 1546 } else if (neg_count == 1) { 1547 // one MOVN and two MOVKs will do 1548 for (i = 0; i < 4; i++) { 1549 if (imm_h[i] != 0xffffL) { 1550 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1551 i++; 1552 break; 1553 } 1554 } 1555 for (;i < 4; i++) { 1556 if (imm_h[i] != 0xffffL) { 1557 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1558 } 1559 } 1560 } else { 1561 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1562 movz(dst, (u_int32_t)imm_h[0], 0); 1563 for (i = 1; i < 4; i++) { 1564 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1565 } 1566 } 1567 } 1568 } 1569 1570 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) 1571 { 1572 #ifndef PRODUCT 1573 { 1574 char buffer[64]; 1575 snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32); 1576 block_comment(buffer); 1577 } 1578 #endif 1579 if (operand_valid_for_logical_immediate(true, imm32)) { 1580 orrw(dst, zr, imm32); 1581 } else { 1582 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1583 // constant 1584 u_int32_t imm_h[2]; 1585 imm_h[0] = imm32 & 0xffff; 1586 imm_h[1] = ((imm32 >> 16) & 0xffff); 1587 if (imm_h[0] == 0) { 1588 movzw(dst, imm_h[1], 16); 1589 } else if (imm_h[0] == 0xffff) { 1590 movnw(dst, imm_h[1] ^ 0xffff, 16); 1591 } else if (imm_h[1] == 0) { 1592 movzw(dst, imm_h[0], 0); 1593 } else if (imm_h[1] == 0xffff) { 1594 movnw(dst, imm_h[0] ^ 0xffff, 0); 1595 } else { 1596 // use a MOVZ and MOVK (makes it easier to debug) 1597 movzw(dst, imm_h[0], 0); 1598 movkw(dst, imm_h[1], 16); 1599 } 1600 } 1601 } 1602 1603 // Form an address from base + offset in Rd. Rd may or may 1604 // not actually be used: you must use the Address that is returned. 1605 // It is up to you to ensure that the shift provided matches the size 1606 // of your data. 1607 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { 1608 if (Address::offset_ok_for_immed(byte_offset, shift)) 1609 // It fits; no need for any heroics 1610 return Address(base, byte_offset); 1611 1612 // Don't do anything clever with negative or misaligned offsets 1613 unsigned mask = (1 << shift) - 1; 1614 if (byte_offset < 0 || byte_offset & mask) { 1615 mov(Rd, byte_offset); 1616 add(Rd, base, Rd); 1617 return Address(Rd); 1618 } 1619 1620 // See if we can do this with two 12-bit offsets 1621 { 1622 unsigned long word_offset = byte_offset >> shift; 1623 unsigned long masked_offset = word_offset & 0xfff000; 1624 if (Address::offset_ok_for_immed(word_offset - masked_offset) 1625 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1626 add(Rd, base, masked_offset << shift); 1627 word_offset -= masked_offset; 1628 return Address(Rd, word_offset << shift); 1629 } 1630 } 1631 1632 // Do it the hard way 1633 mov(Rd, byte_offset); 1634 add(Rd, base, Rd); 1635 return Address(Rd); 1636 } 1637 1638 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) { 1639 Label retry_load; 1640 bind(retry_load); 1641 // flush and load exclusive from the memory location 1642 ldxrw(tmp, counter_addr); 1643 addw(tmp, tmp, 1); 1644 // if we store+flush with no intervening write tmp wil be zero 1645 stxrw(tmp, tmp, counter_addr); 1646 cbnzw(tmp, retry_load); 1647 } 1648 1649 1650 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1651 bool want_remainder, Register scratch) 1652 { 1653 // Full implementation of Java idiv and irem. The function 1654 // returns the (pc) offset of the div instruction - may be needed 1655 // for implicit exceptions. 1656 // 1657 // constraint : ra/rb =/= scratch 1658 // normal case 1659 // 1660 // input : ra: dividend 1661 // rb: divisor 1662 // 1663 // result: either 1664 // quotient (= ra idiv rb) 1665 // remainder (= ra irem rb) 1666 1667 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1668 1669 int idivl_offset = offset(); 1670 if (! want_remainder) { 1671 sdivw(result, ra, rb); 1672 } else { 1673 sdivw(scratch, ra, rb); 1674 Assembler::msubw(result, scratch, rb, ra); 1675 } 1676 1677 return idivl_offset; 1678 } 1679 1680 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1681 bool want_remainder, Register scratch) 1682 { 1683 // Full implementation of Java ldiv and lrem. The function 1684 // returns the (pc) offset of the div instruction - may be needed 1685 // for implicit exceptions. 1686 // 1687 // constraint : ra/rb =/= scratch 1688 // normal case 1689 // 1690 // input : ra: dividend 1691 // rb: divisor 1692 // 1693 // result: either 1694 // quotient (= ra idiv rb) 1695 // remainder (= ra irem rb) 1696 1697 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1698 1699 int idivq_offset = offset(); 1700 if (! want_remainder) { 1701 sdiv(result, ra, rb); 1702 } else { 1703 sdiv(scratch, ra, rb); 1704 Assembler::msub(result, scratch, rb, ra); 1705 } 1706 1707 return idivq_offset; 1708 } 1709 1710 // MacroAssembler routines found actually to be needed 1711 1712 void MacroAssembler::push(Register src) 1713 { 1714 str(src, Address(pre(esp, -1 * wordSize))); 1715 } 1716 1717 void MacroAssembler::pop(Register dst) 1718 { 1719 ldr(dst, Address(post(esp, 1 * wordSize))); 1720 } 1721 1722 // Note: load_unsigned_short used to be called load_unsigned_word. 1723 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1724 int off = offset(); 1725 ldrh(dst, src); 1726 return off; 1727 } 1728 1729 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1730 int off = offset(); 1731 ldrb(dst, src); 1732 return off; 1733 } 1734 1735 int MacroAssembler::load_signed_short(Register dst, Address src) { 1736 int off = offset(); 1737 ldrsh(dst, src); 1738 return off; 1739 } 1740 1741 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1742 int off = offset(); 1743 ldrsb(dst, src); 1744 return off; 1745 } 1746 1747 int MacroAssembler::load_signed_short32(Register dst, Address src) { 1748 int off = offset(); 1749 ldrshw(dst, src); 1750 return off; 1751 } 1752 1753 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 1754 int off = offset(); 1755 ldrsbw(dst, src); 1756 return off; 1757 } 1758 1759 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1760 switch (size_in_bytes) { 1761 case 8: ldr(dst, src); break; 1762 case 4: ldrw(dst, src); break; 1763 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1764 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1765 default: ShouldNotReachHere(); 1766 } 1767 } 1768 1769 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1770 switch (size_in_bytes) { 1771 case 8: str(src, dst); break; 1772 case 4: strw(src, dst); break; 1773 case 2: strh(src, dst); break; 1774 case 1: strb(src, dst); break; 1775 default: ShouldNotReachHere(); 1776 } 1777 } 1778 1779 void MacroAssembler::decrementw(Register reg, int value) 1780 { 1781 if (value < 0) { incrementw(reg, -value); return; } 1782 if (value == 0) { return; } 1783 if (value < (1 << 12)) { subw(reg, reg, value); return; } 1784 /* else */ { 1785 guarantee(reg != rscratch2, "invalid dst for register decrement"); 1786 movw(rscratch2, (unsigned)value); 1787 subw(reg, reg, rscratch2); 1788 } 1789 } 1790 1791 void MacroAssembler::decrement(Register reg, int value) 1792 { 1793 if (value < 0) { increment(reg, -value); return; } 1794 if (value == 0) { return; } 1795 if (value < (1 << 12)) { sub(reg, reg, value); return; } 1796 /* else */ { 1797 assert(reg != rscratch2, "invalid dst for register decrement"); 1798 mov(rscratch2, (unsigned long)value); 1799 sub(reg, reg, rscratch2); 1800 } 1801 } 1802 1803 void MacroAssembler::decrementw(Address dst, int value) 1804 { 1805 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 1806 ldrw(rscratch1, dst); 1807 decrementw(rscratch1, value); 1808 strw(rscratch1, dst); 1809 } 1810 1811 void MacroAssembler::decrement(Address dst, int value) 1812 { 1813 assert(!dst.uses(rscratch1), "invalid address for decrement"); 1814 ldr(rscratch1, dst); 1815 decrement(rscratch1, value); 1816 str(rscratch1, dst); 1817 } 1818 1819 void MacroAssembler::incrementw(Register reg, int value) 1820 { 1821 if (value < 0) { decrementw(reg, -value); return; } 1822 if (value == 0) { return; } 1823 if (value < (1 << 12)) { addw(reg, reg, value); return; } 1824 /* else */ { 1825 assert(reg != rscratch2, "invalid dst for register increment"); 1826 movw(rscratch2, (unsigned)value); 1827 addw(reg, reg, rscratch2); 1828 } 1829 } 1830 1831 void MacroAssembler::increment(Register reg, int value) 1832 { 1833 if (value < 0) { decrement(reg, -value); return; } 1834 if (value == 0) { return; } 1835 if (value < (1 << 12)) { add(reg, reg, value); return; } 1836 /* else */ { 1837 assert(reg != rscratch2, "invalid dst for register increment"); 1838 movw(rscratch2, (unsigned)value); 1839 add(reg, reg, rscratch2); 1840 } 1841 } 1842 1843 void MacroAssembler::incrementw(Address dst, int value) 1844 { 1845 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1846 ldrw(rscratch1, dst); 1847 incrementw(rscratch1, value); 1848 strw(rscratch1, dst); 1849 } 1850 1851 void MacroAssembler::increment(Address dst, int value) 1852 { 1853 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1854 ldr(rscratch1, dst); 1855 increment(rscratch1, value); 1856 str(rscratch1, dst); 1857 } 1858 1859 1860 void MacroAssembler::pusha() { 1861 push(0x7fffffff, sp); 1862 } 1863 1864 void MacroAssembler::popa() { 1865 pop(0x7fffffff, sp); 1866 } 1867 1868 // Push lots of registers in the bit set supplied. Don't push sp. 1869 // Return the number of words pushed 1870 int MacroAssembler::push(unsigned int bitset, Register stack) { 1871 int words_pushed = 0; 1872 1873 // Scan bitset to accumulate register pairs 1874 unsigned char regs[32]; 1875 int count = 0; 1876 for (int reg = 0; reg <= 30; reg++) { 1877 if (1 & bitset) 1878 regs[count++] = reg; 1879 bitset >>= 1; 1880 } 1881 regs[count++] = zr->encoding_nocheck(); 1882 count &= ~1; // Only push an even nuber of regs 1883 1884 if (count) { 1885 stp(as_Register(regs[0]), as_Register(regs[1]), 1886 Address(pre(stack, -count * wordSize))); 1887 words_pushed += 2; 1888 } 1889 for (int i = 2; i < count; i += 2) { 1890 stp(as_Register(regs[i]), as_Register(regs[i+1]), 1891 Address(stack, i * wordSize)); 1892 words_pushed += 2; 1893 } 1894 1895 assert(words_pushed == count, "oops, pushed != count"); 1896 1897 return count; 1898 } 1899 1900 int MacroAssembler::pop(unsigned int bitset, Register stack) { 1901 int words_pushed = 0; 1902 1903 // Scan bitset to accumulate register pairs 1904 unsigned char regs[32]; 1905 int count = 0; 1906 for (int reg = 0; reg <= 30; reg++) { 1907 if (1 & bitset) 1908 regs[count++] = reg; 1909 bitset >>= 1; 1910 } 1911 regs[count++] = zr->encoding_nocheck(); 1912 count &= ~1; 1913 1914 for (int i = 2; i < count; i += 2) { 1915 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 1916 Address(stack, i * wordSize)); 1917 words_pushed += 2; 1918 } 1919 if (count) { 1920 ldp(as_Register(regs[0]), as_Register(regs[1]), 1921 Address(post(stack, count * wordSize))); 1922 words_pushed += 2; 1923 } 1924 1925 assert(words_pushed == count, "oops, pushed != count"); 1926 1927 return count; 1928 } 1929 #ifdef ASSERT 1930 void MacroAssembler::verify_heapbase(const char* msg) { 1931 #if 0 1932 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 1933 assert (Universe::heap() != NULL, "java heap should be initialized"); 1934 if (CheckCompressedOops) { 1935 Label ok; 1936 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 1937 cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 1938 br(Assembler::EQ, ok); 1939 stop(msg); 1940 bind(ok); 1941 pop(1 << rscratch1->encoding(), sp); 1942 } 1943 #endif 1944 } 1945 #endif 1946 1947 void MacroAssembler::stop(const char* msg) { 1948 address ip = pc(); 1949 pusha(); 1950 mov(c_rarg0, (address)msg); 1951 mov(c_rarg1, (address)ip); 1952 mov(c_rarg2, sp); 1953 mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); 1954 // call(c_rarg3); 1955 blrt(c_rarg3, 3, 0, 1); 1956 hlt(0); 1957 } 1958 1959 // If a constant does not fit in an immediate field, generate some 1960 // number of MOV instructions and then perform the operation. 1961 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1962 add_sub_imm_insn insn1, 1963 add_sub_reg_insn insn2) { 1964 assert(Rd != zr, "Rd = zr and not setting flags?"); 1965 if (operand_valid_for_add_sub_immediate((int)imm)) { 1966 (this->*insn1)(Rd, Rn, imm); 1967 } else { 1968 if (uabs(imm) < (1 << 24)) { 1969 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 1970 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 1971 } else { 1972 assert_different_registers(Rd, Rn); 1973 mov(Rd, (uint64_t)imm); 1974 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1975 } 1976 } 1977 } 1978 1979 // Seperate vsn which sets the flags. Optimisations are more restricted 1980 // because we must set the flags correctly. 1981 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1982 add_sub_imm_insn insn1, 1983 add_sub_reg_insn insn2) { 1984 if (operand_valid_for_add_sub_immediate((int)imm)) { 1985 (this->*insn1)(Rd, Rn, imm); 1986 } else { 1987 assert_different_registers(Rd, Rn); 1988 assert(Rd != zr, "overflow in immediate operand"); 1989 mov(Rd, (uint64_t)imm); 1990 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1991 } 1992 } 1993 1994 1995 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 1996 if (increment.is_register()) { 1997 add(Rd, Rn, increment.as_register()); 1998 } else { 1999 add(Rd, Rn, increment.as_constant()); 2000 } 2001 } 2002 2003 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2004 if (increment.is_register()) { 2005 addw(Rd, Rn, increment.as_register()); 2006 } else { 2007 addw(Rd, Rn, increment.as_constant()); 2008 } 2009 } 2010 2011 void MacroAssembler::reinit_heapbase() 2012 { 2013 if (UseCompressedOops) { 2014 if (Universe::is_fully_initialized()) { 2015 mov(rheapbase, Universe::narrow_ptrs_base()); 2016 } else { 2017 lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 2018 ldr(rheapbase, Address(rheapbase)); 2019 } 2020 } 2021 } 2022 2023 // this simulates the behaviour of the x86 cmpxchg instruction using a 2024 // load linked/store conditional pair. we use the acquire/release 2025 // versions of these instructions so that we flush pending writes as 2026 // per Java semantics. 2027 2028 // n.b the x86 version assumes the old value to be compared against is 2029 // in rax and updates rax with the value located in memory if the 2030 // cmpxchg fails. we supply a register for the old value explicitly 2031 2032 // the aarch64 load linked/store conditional instructions do not 2033 // accept an offset. so, unlike x86, we must provide a plain register 2034 // to identify the memory word to be compared/exchanged rather than a 2035 // register+offset Address. 2036 2037 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2038 Label &succeed, Label *fail) { 2039 // oldv holds comparison value 2040 // newv holds value to write in exchange 2041 // addr identifies memory word to compare against/update 2042 // tmp returns 0/1 for success/failure 2043 Label retry_load, nope; 2044 2045 bind(retry_load); 2046 // flush and load exclusive from the memory location 2047 // and fail if it is not what we expect 2048 ldaxr(tmp, addr); 2049 cmp(tmp, oldv); 2050 br(Assembler::NE, nope); 2051 // if we store+flush with no intervening write tmp wil be zero 2052 stlxr(tmp, newv, addr); 2053 cbzw(tmp, succeed); 2054 // retry so we only ever return after a load fails to compare 2055 // ensures we don't return a stale value after a failed write. 2056 b(retry_load); 2057 // if the memory word differs we return it in oldv and signal a fail 2058 bind(nope); 2059 membar(AnyAny); 2060 mov(oldv, tmp); 2061 if (fail) 2062 b(*fail); 2063 } 2064 2065 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2066 Label &succeed, Label *fail) { 2067 // oldv holds comparison value 2068 // newv holds value to write in exchange 2069 // addr identifies memory word to compare against/update 2070 // tmp returns 0/1 for success/failure 2071 Label retry_load, nope; 2072 2073 bind(retry_load); 2074 // flush and load exclusive from the memory location 2075 // and fail if it is not what we expect 2076 ldaxrw(tmp, addr); 2077 cmp(tmp, oldv); 2078 br(Assembler::NE, nope); 2079 // if we store+flush with no intervening write tmp wil be zero 2080 stlxrw(tmp, newv, addr); 2081 cbzw(tmp, succeed); 2082 // retry so we only ever return after a load fails to compare 2083 // ensures we don't return a stale value after a failed write. 2084 b(retry_load); 2085 // if the memory word differs we return it in oldv and signal a fail 2086 bind(nope); 2087 membar(AnyAny); 2088 mov(oldv, tmp); 2089 if (fail) 2090 b(*fail); 2091 } 2092 2093 static bool different(Register a, RegisterOrConstant b, Register c) { 2094 if (b.is_constant()) 2095 return a != c; 2096 else 2097 return a != b.as_register() && a != c && b.as_register() != c; 2098 } 2099 2100 #define ATOMIC_OP(LDXR, OP, STXR) \ 2101 void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ 2102 Register result = rscratch2; \ 2103 if (prev->is_valid()) \ 2104 result = different(prev, incr, addr) ? prev : rscratch2; \ 2105 \ 2106 Label retry_load; \ 2107 bind(retry_load); \ 2108 LDXR(result, addr); \ 2109 OP(rscratch1, result, incr); \ 2110 STXR(rscratch1, rscratch1, addr); \ 2111 cbnzw(rscratch1, retry_load); \ 2112 if (prev->is_valid() && prev != result) \ 2113 mov(prev, result); \ 2114 } 2115 2116 ATOMIC_OP(ldxr, add, stxr) 2117 ATOMIC_OP(ldxrw, addw, stxrw) 2118 2119 #undef ATOMIC_OP 2120 2121 #define ATOMIC_XCHG(OP, LDXR, STXR) \ 2122 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2123 Register result = rscratch2; \ 2124 if (prev->is_valid()) \ 2125 result = different(prev, newv, addr) ? prev : rscratch2; \ 2126 \ 2127 Label retry_load; \ 2128 bind(retry_load); \ 2129 LDXR(result, addr); \ 2130 STXR(rscratch1, newv, addr); \ 2131 cbnzw(rscratch1, retry_load); \ 2132 if (prev->is_valid() && prev != result) \ 2133 mov(prev, result); \ 2134 } 2135 2136 ATOMIC_XCHG(xchg, ldxr, stxr) 2137 ATOMIC_XCHG(xchgw, ldxrw, stxrw) 2138 2139 #undef ATOMIC_XCHG 2140 2141 void MacroAssembler::incr_allocated_bytes(Register thread, 2142 Register var_size_in_bytes, 2143 int con_size_in_bytes, 2144 Register t1) { 2145 if (!thread->is_valid()) { 2146 thread = rthread; 2147 } 2148 assert(t1->is_valid(), "need temp reg"); 2149 2150 ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2151 if (var_size_in_bytes->is_valid()) { 2152 add(t1, t1, var_size_in_bytes); 2153 } else { 2154 add(t1, t1, con_size_in_bytes); 2155 } 2156 str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2157 } 2158 2159 #ifndef PRODUCT 2160 extern "C" void findpc(intptr_t x); 2161 #endif 2162 2163 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2164 { 2165 // In order to get locks to work, we need to fake a in_VM state 2166 if (ShowMessageBoxOnError ) { 2167 JavaThread* thread = JavaThread::current(); 2168 JavaThreadState saved_state = thread->thread_state(); 2169 thread->set_thread_state(_thread_in_vm); 2170 #ifndef PRODUCT 2171 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2172 ttyLocker ttyl; 2173 BytecodeCounter::print(); 2174 } 2175 #endif 2176 if (os::message_box(msg, "Execution stopped, print registers?")) { 2177 ttyLocker ttyl; 2178 tty->print_cr(" pc = 0x%016lx", pc); 2179 #ifndef PRODUCT 2180 tty->cr(); 2181 findpc(pc); 2182 tty->cr(); 2183 #endif 2184 tty->print_cr(" r0 = 0x%016lx", regs[0]); 2185 tty->print_cr(" r1 = 0x%016lx", regs[1]); 2186 tty->print_cr(" r2 = 0x%016lx", regs[2]); 2187 tty->print_cr(" r3 = 0x%016lx", regs[3]); 2188 tty->print_cr(" r4 = 0x%016lx", regs[4]); 2189 tty->print_cr(" r5 = 0x%016lx", regs[5]); 2190 tty->print_cr(" r6 = 0x%016lx", regs[6]); 2191 tty->print_cr(" r7 = 0x%016lx", regs[7]); 2192 tty->print_cr(" r8 = 0x%016lx", regs[8]); 2193 tty->print_cr(" r9 = 0x%016lx", regs[9]); 2194 tty->print_cr("r10 = 0x%016lx", regs[10]); 2195 tty->print_cr("r11 = 0x%016lx", regs[11]); 2196 tty->print_cr("r12 = 0x%016lx", regs[12]); 2197 tty->print_cr("r13 = 0x%016lx", regs[13]); 2198 tty->print_cr("r14 = 0x%016lx", regs[14]); 2199 tty->print_cr("r15 = 0x%016lx", regs[15]); 2200 tty->print_cr("r16 = 0x%016lx", regs[16]); 2201 tty->print_cr("r17 = 0x%016lx", regs[17]); 2202 tty->print_cr("r18 = 0x%016lx", regs[18]); 2203 tty->print_cr("r19 = 0x%016lx", regs[19]); 2204 tty->print_cr("r20 = 0x%016lx", regs[20]); 2205 tty->print_cr("r21 = 0x%016lx", regs[21]); 2206 tty->print_cr("r22 = 0x%016lx", regs[22]); 2207 tty->print_cr("r23 = 0x%016lx", regs[23]); 2208 tty->print_cr("r24 = 0x%016lx", regs[24]); 2209 tty->print_cr("r25 = 0x%016lx", regs[25]); 2210 tty->print_cr("r26 = 0x%016lx", regs[26]); 2211 tty->print_cr("r27 = 0x%016lx", regs[27]); 2212 tty->print_cr("r28 = 0x%016lx", regs[28]); 2213 tty->print_cr("r30 = 0x%016lx", regs[30]); 2214 tty->print_cr("r31 = 0x%016lx", regs[31]); 2215 BREAKPOINT; 2216 } 2217 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 2218 } else { 2219 ttyLocker ttyl; 2220 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 2221 msg); 2222 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 2223 } 2224 } 2225 2226 #ifdef BUILTIN_SIM 2227 // routine to generate an x86 prolog for a stub function which 2228 // bootstraps into the generated ARM code which directly follows the 2229 // stub 2230 // 2231 // the argument encodes the number of general and fp registers 2232 // passed by the caller and the callng convention (currently just 2233 // the number of general registers and assumes C argument passing) 2234 2235 extern "C" { 2236 int aarch64_stub_prolog_size(); 2237 void aarch64_stub_prolog(); 2238 void aarch64_prolog(); 2239 } 2240 2241 void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, 2242 address *prolog_ptr) 2243 { 2244 int calltype = (((ret_type & 0x3) << 8) | 2245 ((fp_arg_count & 0xf) << 4) | 2246 (gp_arg_count & 0xf)); 2247 2248 // the addresses for the x86 to ARM entry code we need to use 2249 address start = pc(); 2250 // printf("start = %lx\n", start); 2251 int byteCount = aarch64_stub_prolog_size(); 2252 // printf("byteCount = %x\n", byteCount); 2253 int instructionCount = (byteCount + 3)/ 4; 2254 // printf("instructionCount = %x\n", instructionCount); 2255 for (int i = 0; i < instructionCount; i++) { 2256 nop(); 2257 } 2258 2259 memcpy(start, (void*)aarch64_stub_prolog, byteCount); 2260 2261 // write the address of the setup routine and the call format at the 2262 // end of into the copied code 2263 u_int64_t *patch_end = (u_int64_t *)(start + byteCount); 2264 if (prolog_ptr) 2265 patch_end[-2] = (u_int64_t)prolog_ptr; 2266 patch_end[-1] = calltype; 2267 } 2268 #endif 2269 2270 void MacroAssembler::push_CPU_state() { 2271 push(0x3fffffff, sp); // integer registers except lr & sp 2272 2273 for (int i = 30; i >= 0; i -= 2) 2274 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2275 Address(pre(sp, -2 * wordSize))); 2276 } 2277 2278 void MacroAssembler::pop_CPU_state() { 2279 for (int i = 0; i < 32; i += 2) 2280 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2281 Address(post(sp, 2 * wordSize))); 2282 2283 pop(0x3fffffff, sp); // integer registers except lr & sp 2284 } 2285 2286 /** 2287 * Helpers for multiply_to_len(). 2288 */ 2289 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 2290 Register src1, Register src2) { 2291 adds(dest_lo, dest_lo, src1); 2292 adc(dest_hi, dest_hi, zr); 2293 adds(dest_lo, dest_lo, src2); 2294 adc(final_dest_hi, dest_hi, zr); 2295 } 2296 2297 // Generate an address from (r + r1 extend offset). "size" is the 2298 // size of the operand. The result may be in rscratch2. 2299 Address MacroAssembler::offsetted_address(Register r, Register r1, 2300 Address::extend ext, int offset, int size) { 2301 if (offset || (ext.shift() % size != 0)) { 2302 lea(rscratch2, Address(r, r1, ext)); 2303 return Address(rscratch2, offset); 2304 } else { 2305 return Address(r, r1, ext); 2306 } 2307 } 2308 2309 Address MacroAssembler::spill_address(int size, int offset) 2310 { 2311 assert(offset >= 0, "spill to negative address?"); 2312 // Offset reachable ? 2313 // Not aligned - 9 bits signed offset 2314 // Aligned - 12 bits unsigned offset shifted 2315 Register base = sp; 2316 if ((offset & (size-1)) && offset >= (1<<8)) { 2317 add(rscratch2, base, offset & ((1<<12)-1)); 2318 base = rscratch2; 2319 offset &= ~((1<<12)-1); 2320 } 2321 2322 if (offset >= (1<<12) * size) { 2323 add(rscratch2, base, offset & (((1<<12)-1)<<12)); 2324 base = rscratch2; 2325 offset &= ~(((1<<12)-1)<<12); 2326 } 2327 2328 return Address(base, offset); 2329 } 2330 2331 /** 2332 * Multiply 64 bit by 64 bit first loop. 2333 */ 2334 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2335 Register y, Register y_idx, Register z, 2336 Register carry, Register product, 2337 Register idx, Register kdx) { 2338 // 2339 // jlong carry, x[], y[], z[]; 2340 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2341 // huge_128 product = y[idx] * x[xstart] + carry; 2342 // z[kdx] = (jlong)product; 2343 // carry = (jlong)(product >>> 64); 2344 // } 2345 // z[xstart] = carry; 2346 // 2347 2348 Label L_first_loop, L_first_loop_exit; 2349 Label L_one_x, L_one_y, L_multiply; 2350 2351 subsw(xstart, xstart, 1); 2352 br(Assembler::MI, L_one_x); 2353 2354 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 2355 ldr(x_xstart, Address(rscratch1)); 2356 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 2357 2358 bind(L_first_loop); 2359 subsw(idx, idx, 1); 2360 br(Assembler::MI, L_first_loop_exit); 2361 subsw(idx, idx, 1); 2362 br(Assembler::MI, L_one_y); 2363 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2364 ldr(y_idx, Address(rscratch1)); 2365 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 2366 bind(L_multiply); 2367 2368 // AArch64 has a multiply-accumulate instruction that we can't use 2369 // here because it has no way to process carries, so we have to use 2370 // separate add and adc instructions. Bah. 2371 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 2372 mul(product, x_xstart, y_idx); 2373 adds(product, product, carry); 2374 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 2375 2376 subw(kdx, kdx, 2); 2377 ror(product, product, 32); // back to big-endian 2378 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 2379 2380 b(L_first_loop); 2381 2382 bind(L_one_y); 2383 ldrw(y_idx, Address(y, 0)); 2384 b(L_multiply); 2385 2386 bind(L_one_x); 2387 ldrw(x_xstart, Address(x, 0)); 2388 b(L_first_loop); 2389 2390 bind(L_first_loop_exit); 2391 } 2392 2393 /** 2394 * Multiply 128 bit by 128. Unrolled inner loop. 2395 * 2396 */ 2397 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 2398 Register carry, Register carry2, 2399 Register idx, Register jdx, 2400 Register yz_idx1, Register yz_idx2, 2401 Register tmp, Register tmp3, Register tmp4, 2402 Register tmp6, Register product_hi) { 2403 2404 // jlong carry, x[], y[], z[]; 2405 // int kdx = ystart+1; 2406 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 2407 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 2408 // jlong carry2 = (jlong)(tmp3 >>> 64); 2409 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 2410 // carry = (jlong)(tmp4 >>> 64); 2411 // z[kdx+idx+1] = (jlong)tmp3; 2412 // z[kdx+idx] = (jlong)tmp4; 2413 // } 2414 // idx += 2; 2415 // if (idx > 0) { 2416 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 2417 // z[kdx+idx] = (jlong)yz_idx1; 2418 // carry = (jlong)(yz_idx1 >>> 64); 2419 // } 2420 // 2421 2422 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 2423 2424 lsrw(jdx, idx, 2); 2425 2426 bind(L_third_loop); 2427 2428 subsw(jdx, jdx, 1); 2429 br(Assembler::MI, L_third_loop_exit); 2430 subw(idx, idx, 4); 2431 2432 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2433 2434 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 2435 2436 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2437 2438 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 2439 ror(yz_idx2, yz_idx2, 32); 2440 2441 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 2442 2443 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2444 umulh(tmp4, product_hi, yz_idx1); 2445 2446 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 2447 ror(rscratch2, rscratch2, 32); 2448 2449 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 2450 umulh(carry2, product_hi, yz_idx2); 2451 2452 // propagate sum of both multiplications into carry:tmp4:tmp3 2453 adds(tmp3, tmp3, carry); 2454 adc(tmp4, tmp4, zr); 2455 adds(tmp3, tmp3, rscratch1); 2456 adcs(tmp4, tmp4, tmp); 2457 adc(carry, carry2, zr); 2458 adds(tmp4, tmp4, rscratch2); 2459 adc(carry, carry, zr); 2460 2461 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 2462 ror(tmp4, tmp4, 32); 2463 stp(tmp4, tmp3, Address(tmp6, 0)); 2464 2465 b(L_third_loop); 2466 bind (L_third_loop_exit); 2467 2468 andw (idx, idx, 0x3); 2469 cbz(idx, L_post_third_loop_done); 2470 2471 Label L_check_1; 2472 subsw(idx, idx, 2); 2473 br(Assembler::MI, L_check_1); 2474 2475 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2476 ldr(yz_idx1, Address(rscratch1, 0)); 2477 ror(yz_idx1, yz_idx1, 32); 2478 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2479 umulh(tmp4, product_hi, yz_idx1); 2480 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2481 ldr(yz_idx2, Address(rscratch1, 0)); 2482 ror(yz_idx2, yz_idx2, 32); 2483 2484 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 2485 2486 ror(tmp3, tmp3, 32); 2487 str(tmp3, Address(rscratch1, 0)); 2488 2489 bind (L_check_1); 2490 2491 andw (idx, idx, 0x1); 2492 subsw(idx, idx, 1); 2493 br(Assembler::MI, L_post_third_loop_done); 2494 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2495 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 2496 umulh(carry2, tmp4, product_hi); 2497 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2498 2499 add2_with_carry(carry2, tmp3, tmp4, carry); 2500 2501 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2502 extr(carry, carry2, tmp3, 32); 2503 2504 bind(L_post_third_loop_done); 2505 } 2506 2507 /** 2508 * Code for BigInteger::multiplyToLen() instrinsic. 2509 * 2510 * r0: x 2511 * r1: xlen 2512 * r2: y 2513 * r3: ylen 2514 * r4: z 2515 * r5: zlen 2516 * r10: tmp1 2517 * r11: tmp2 2518 * r12: tmp3 2519 * r13: tmp4 2520 * r14: tmp5 2521 * r15: tmp6 2522 * r16: tmp7 2523 * 2524 */ 2525 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 2526 Register z, Register zlen, 2527 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 2528 Register tmp5, Register tmp6, Register product_hi) { 2529 2530 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 2531 2532 const Register idx = tmp1; 2533 const Register kdx = tmp2; 2534 const Register xstart = tmp3; 2535 2536 const Register y_idx = tmp4; 2537 const Register carry = tmp5; 2538 const Register product = xlen; 2539 const Register x_xstart = zlen; // reuse register 2540 2541 // First Loop. 2542 // 2543 // final static long LONG_MASK = 0xffffffffL; 2544 // int xstart = xlen - 1; 2545 // int ystart = ylen - 1; 2546 // long carry = 0; 2547 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2548 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 2549 // z[kdx] = (int)product; 2550 // carry = product >>> 32; 2551 // } 2552 // z[xstart] = (int)carry; 2553 // 2554 2555 movw(idx, ylen); // idx = ylen; 2556 movw(kdx, zlen); // kdx = xlen+ylen; 2557 mov(carry, zr); // carry = 0; 2558 2559 Label L_done; 2560 2561 movw(xstart, xlen); 2562 subsw(xstart, xstart, 1); 2563 br(Assembler::MI, L_done); 2564 2565 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 2566 2567 Label L_second_loop; 2568 cbzw(kdx, L_second_loop); 2569 2570 Label L_carry; 2571 subw(kdx, kdx, 1); 2572 cbzw(kdx, L_carry); 2573 2574 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2575 lsr(carry, carry, 32); 2576 subw(kdx, kdx, 1); 2577 2578 bind(L_carry); 2579 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2580 2581 // Second and third (nested) loops. 2582 // 2583 // for (int i = xstart-1; i >= 0; i--) { // Second loop 2584 // carry = 0; 2585 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 2586 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 2587 // (z[k] & LONG_MASK) + carry; 2588 // z[k] = (int)product; 2589 // carry = product >>> 32; 2590 // } 2591 // z[i] = (int)carry; 2592 // } 2593 // 2594 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 2595 2596 const Register jdx = tmp1; 2597 2598 bind(L_second_loop); 2599 mov(carry, zr); // carry = 0; 2600 movw(jdx, ylen); // j = ystart+1 2601 2602 subsw(xstart, xstart, 1); // i = xstart-1; 2603 br(Assembler::MI, L_done); 2604 2605 str(z, Address(pre(sp, -4 * wordSize))); 2606 2607 Label L_last_x; 2608 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 2609 subsw(xstart, xstart, 1); // i = xstart-1; 2610 br(Assembler::MI, L_last_x); 2611 2612 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 2613 ldr(product_hi, Address(rscratch1)); 2614 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 2615 2616 Label L_third_loop_prologue; 2617 bind(L_third_loop_prologue); 2618 2619 str(ylen, Address(sp, wordSize)); 2620 stp(x, xstart, Address(sp, 2 * wordSize)); 2621 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 2622 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 2623 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 2624 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 2625 2626 addw(tmp3, xlen, 1); 2627 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2628 subsw(tmp3, tmp3, 1); 2629 br(Assembler::MI, L_done); 2630 2631 lsr(carry, carry, 32); 2632 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2633 b(L_second_loop); 2634 2635 // Next infrequent code is moved outside loops. 2636 bind(L_last_x); 2637 ldrw(product_hi, Address(x, 0)); 2638 b(L_third_loop_prologue); 2639 2640 bind(L_done); 2641 } 2642 2643 /** 2644 * Emits code to update CRC-32 with a byte value according to constants in table 2645 * 2646 * @param [in,out]crc Register containing the crc. 2647 * @param [in]val Register containing the byte to fold into the CRC. 2648 * @param [in]table Register containing the table of crc constants. 2649 * 2650 * uint32_t crc; 2651 * val = crc_table[(val ^ crc) & 0xFF]; 2652 * crc = val ^ (crc >> 8); 2653 * 2654 */ 2655 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 2656 eor(val, val, crc); 2657 andr(val, val, 0xff); 2658 ldrw(val, Address(table, val, Address::lsl(2))); 2659 eor(crc, val, crc, Assembler::LSR, 8); 2660 } 2661 2662 /** 2663 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 2664 * 2665 * @param [in,out]crc Register containing the crc. 2666 * @param [in]v Register containing the 32-bit to fold into the CRC. 2667 * @param [in]table0 Register containing table 0 of crc constants. 2668 * @param [in]table1 Register containing table 1 of crc constants. 2669 * @param [in]table2 Register containing table 2 of crc constants. 2670 * @param [in]table3 Register containing table 3 of crc constants. 2671 * 2672 * uint32_t crc; 2673 * v = crc ^ v 2674 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 2675 * 2676 */ 2677 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 2678 Register table0, Register table1, Register table2, Register table3, 2679 bool upper) { 2680 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 2681 uxtb(tmp, v); 2682 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 2683 ubfx(tmp, v, 8, 8); 2684 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 2685 eor(crc, crc, tmp); 2686 ubfx(tmp, v, 16, 8); 2687 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 2688 eor(crc, crc, tmp); 2689 ubfx(tmp, v, 24, 8); 2690 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 2691 eor(crc, crc, tmp); 2692 } 2693 2694 /** 2695 * @param crc register containing existing CRC (32-bit) 2696 * @param buf register pointing to input byte buffer (byte*) 2697 * @param len register containing number of bytes 2698 * @param table register that will contain address of CRC table 2699 * @param tmp scratch register 2700 */ 2701 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 2702 Register table0, Register table1, Register table2, Register table3, 2703 Register tmp, Register tmp2, Register tmp3) { 2704 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 2705 unsigned long offset; 2706 2707 ornw(crc, zr, crc); 2708 2709 if (UseCRC32) { 2710 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2711 2712 subs(len, len, 64); 2713 br(Assembler::GE, CRC_by64_loop); 2714 adds(len, len, 64-4); 2715 br(Assembler::GE, CRC_by4_loop); 2716 adds(len, len, 4); 2717 br(Assembler::GT, CRC_by1_loop); 2718 b(L_exit); 2719 2720 BIND(CRC_by4_loop); 2721 ldrw(tmp, Address(post(buf, 4))); 2722 subs(len, len, 4); 2723 crc32w(crc, crc, tmp); 2724 br(Assembler::GE, CRC_by4_loop); 2725 adds(len, len, 4); 2726 br(Assembler::LE, L_exit); 2727 BIND(CRC_by1_loop); 2728 ldrb(tmp, Address(post(buf, 1))); 2729 subs(len, len, 1); 2730 crc32b(crc, crc, tmp); 2731 br(Assembler::GT, CRC_by1_loop); 2732 b(L_exit); 2733 2734 align(CodeEntryAlignment); 2735 BIND(CRC_by64_loop); 2736 subs(len, len, 64); 2737 ldp(tmp, tmp3, Address(post(buf, 16))); 2738 crc32x(crc, crc, tmp); 2739 crc32x(crc, crc, tmp3); 2740 ldp(tmp, tmp3, Address(post(buf, 16))); 2741 crc32x(crc, crc, tmp); 2742 crc32x(crc, crc, tmp3); 2743 ldp(tmp, tmp3, Address(post(buf, 16))); 2744 crc32x(crc, crc, tmp); 2745 crc32x(crc, crc, tmp3); 2746 ldp(tmp, tmp3, Address(post(buf, 16))); 2747 crc32x(crc, crc, tmp); 2748 crc32x(crc, crc, tmp3); 2749 br(Assembler::GE, CRC_by64_loop); 2750 adds(len, len, 64-4); 2751 br(Assembler::GE, CRC_by4_loop); 2752 adds(len, len, 4); 2753 br(Assembler::GT, CRC_by1_loop); 2754 BIND(L_exit); 2755 ornw(crc, zr, crc); 2756 return; 2757 } 2758 2759 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2760 if (offset) add(table0, table0, offset); 2761 add(table1, table0, 1*256*sizeof(juint)); 2762 add(table2, table0, 2*256*sizeof(juint)); 2763 add(table3, table0, 3*256*sizeof(juint)); 2764 2765 if (UseNeon) { 2766 cmp(len, 64); 2767 br(Assembler::LT, L_by16); 2768 eor(v16, T16B, v16, v16); 2769 2770 Label L_fold; 2771 2772 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 2773 2774 ld1(v0, v1, T2D, post(buf, 32)); 2775 ld1r(v4, T2D, post(tmp, 8)); 2776 ld1r(v5, T2D, post(tmp, 8)); 2777 ld1r(v6, T2D, post(tmp, 8)); 2778 ld1r(v7, T2D, post(tmp, 8)); 2779 mov(v16, T4S, 0, crc); 2780 2781 eor(v0, T16B, v0, v16); 2782 sub(len, len, 64); 2783 2784 BIND(L_fold); 2785 pmull(v22, T8H, v0, v5, T8B); 2786 pmull(v20, T8H, v0, v7, T8B); 2787 pmull(v23, T8H, v0, v4, T8B); 2788 pmull(v21, T8H, v0, v6, T8B); 2789 2790 pmull2(v18, T8H, v0, v5, T16B); 2791 pmull2(v16, T8H, v0, v7, T16B); 2792 pmull2(v19, T8H, v0, v4, T16B); 2793 pmull2(v17, T8H, v0, v6, T16B); 2794 2795 uzp1(v24, v20, v22, T8H); 2796 uzp2(v25, v20, v22, T8H); 2797 eor(v20, T16B, v24, v25); 2798 2799 uzp1(v26, v16, v18, T8H); 2800 uzp2(v27, v16, v18, T8H); 2801 eor(v16, T16B, v26, v27); 2802 2803 ushll2(v22, T4S, v20, T8H, 8); 2804 ushll(v20, T4S, v20, T4H, 8); 2805 2806 ushll2(v18, T4S, v16, T8H, 8); 2807 ushll(v16, T4S, v16, T4H, 8); 2808 2809 eor(v22, T16B, v23, v22); 2810 eor(v18, T16B, v19, v18); 2811 eor(v20, T16B, v21, v20); 2812 eor(v16, T16B, v17, v16); 2813 2814 uzp1(v17, v16, v20, T2D); 2815 uzp2(v21, v16, v20, T2D); 2816 eor(v17, T16B, v17, v21); 2817 2818 ushll2(v20, T2D, v17, T4S, 16); 2819 ushll(v16, T2D, v17, T2S, 16); 2820 2821 eor(v20, T16B, v20, v22); 2822 eor(v16, T16B, v16, v18); 2823 2824 uzp1(v17, v20, v16, T2D); 2825 uzp2(v21, v20, v16, T2D); 2826 eor(v28, T16B, v17, v21); 2827 2828 pmull(v22, T8H, v1, v5, T8B); 2829 pmull(v20, T8H, v1, v7, T8B); 2830 pmull(v23, T8H, v1, v4, T8B); 2831 pmull(v21, T8H, v1, v6, T8B); 2832 2833 pmull2(v18, T8H, v1, v5, T16B); 2834 pmull2(v16, T8H, v1, v7, T16B); 2835 pmull2(v19, T8H, v1, v4, T16B); 2836 pmull2(v17, T8H, v1, v6, T16B); 2837 2838 ld1(v0, v1, T2D, post(buf, 32)); 2839 2840 uzp1(v24, v20, v22, T8H); 2841 uzp2(v25, v20, v22, T8H); 2842 eor(v20, T16B, v24, v25); 2843 2844 uzp1(v26, v16, v18, T8H); 2845 uzp2(v27, v16, v18, T8H); 2846 eor(v16, T16B, v26, v27); 2847 2848 ushll2(v22, T4S, v20, T8H, 8); 2849 ushll(v20, T4S, v20, T4H, 8); 2850 2851 ushll2(v18, T4S, v16, T8H, 8); 2852 ushll(v16, T4S, v16, T4H, 8); 2853 2854 eor(v22, T16B, v23, v22); 2855 eor(v18, T16B, v19, v18); 2856 eor(v20, T16B, v21, v20); 2857 eor(v16, T16B, v17, v16); 2858 2859 uzp1(v17, v16, v20, T2D); 2860 uzp2(v21, v16, v20, T2D); 2861 eor(v16, T16B, v17, v21); 2862 2863 ushll2(v20, T2D, v16, T4S, 16); 2864 ushll(v16, T2D, v16, T2S, 16); 2865 2866 eor(v20, T16B, v22, v20); 2867 eor(v16, T16B, v16, v18); 2868 2869 uzp1(v17, v20, v16, T2D); 2870 uzp2(v21, v20, v16, T2D); 2871 eor(v20, T16B, v17, v21); 2872 2873 shl(v16, T2D, v28, 1); 2874 shl(v17, T2D, v20, 1); 2875 2876 eor(v0, T16B, v0, v16); 2877 eor(v1, T16B, v1, v17); 2878 2879 subs(len, len, 32); 2880 br(Assembler::GE, L_fold); 2881 2882 mov(crc, 0); 2883 mov(tmp, v0, T1D, 0); 2884 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2885 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2886 mov(tmp, v0, T1D, 1); 2887 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2888 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2889 mov(tmp, v1, T1D, 0); 2890 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2891 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2892 mov(tmp, v1, T1D, 1); 2893 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2894 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2895 2896 add(len, len, 32); 2897 } 2898 2899 BIND(L_by16); 2900 subs(len, len, 16); 2901 br(Assembler::GE, L_by16_loop); 2902 adds(len, len, 16-4); 2903 br(Assembler::GE, L_by4_loop); 2904 adds(len, len, 4); 2905 br(Assembler::GT, L_by1_loop); 2906 b(L_exit); 2907 2908 BIND(L_by4_loop); 2909 ldrw(tmp, Address(post(buf, 4))); 2910 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 2911 subs(len, len, 4); 2912 br(Assembler::GE, L_by4_loop); 2913 adds(len, len, 4); 2914 br(Assembler::LE, L_exit); 2915 BIND(L_by1_loop); 2916 subs(len, len, 1); 2917 ldrb(tmp, Address(post(buf, 1))); 2918 update_byte_crc32(crc, tmp, table0); 2919 br(Assembler::GT, L_by1_loop); 2920 b(L_exit); 2921 2922 align(CodeEntryAlignment); 2923 BIND(L_by16_loop); 2924 subs(len, len, 16); 2925 ldp(tmp, tmp3, Address(post(buf, 16))); 2926 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2927 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2928 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 2929 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 2930 br(Assembler::GE, L_by16_loop); 2931 adds(len, len, 16-4); 2932 br(Assembler::GE, L_by4_loop); 2933 adds(len, len, 4); 2934 br(Assembler::GT, L_by1_loop); 2935 BIND(L_exit); 2936 ornw(crc, zr, crc); 2937 } 2938 2939 /** 2940 * @param crc register containing existing CRC (32-bit) 2941 * @param buf register pointing to input byte buffer (byte*) 2942 * @param len register containing number of bytes 2943 * @param table register that will contain address of CRC table 2944 * @param tmp scratch register 2945 */ 2946 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 2947 Register table0, Register table1, Register table2, Register table3, 2948 Register tmp, Register tmp2, Register tmp3) { 2949 Label L_exit; 2950 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2951 2952 subs(len, len, 64); 2953 br(Assembler::GE, CRC_by64_loop); 2954 adds(len, len, 64-4); 2955 br(Assembler::GE, CRC_by4_loop); 2956 adds(len, len, 4); 2957 br(Assembler::GT, CRC_by1_loop); 2958 b(L_exit); 2959 2960 BIND(CRC_by4_loop); 2961 ldrw(tmp, Address(post(buf, 4))); 2962 subs(len, len, 4); 2963 crc32cw(crc, crc, tmp); 2964 br(Assembler::GE, CRC_by4_loop); 2965 adds(len, len, 4); 2966 br(Assembler::LE, L_exit); 2967 BIND(CRC_by1_loop); 2968 ldrb(tmp, Address(post(buf, 1))); 2969 subs(len, len, 1); 2970 crc32cb(crc, crc, tmp); 2971 br(Assembler::GT, CRC_by1_loop); 2972 b(L_exit); 2973 2974 align(CodeEntryAlignment); 2975 BIND(CRC_by64_loop); 2976 subs(len, len, 64); 2977 ldp(tmp, tmp3, Address(post(buf, 16))); 2978 crc32cx(crc, crc, tmp); 2979 crc32cx(crc, crc, tmp3); 2980 ldp(tmp, tmp3, Address(post(buf, 16))); 2981 crc32cx(crc, crc, tmp); 2982 crc32cx(crc, crc, tmp3); 2983 ldp(tmp, tmp3, Address(post(buf, 16))); 2984 crc32cx(crc, crc, tmp); 2985 crc32cx(crc, crc, tmp3); 2986 ldp(tmp, tmp3, Address(post(buf, 16))); 2987 crc32cx(crc, crc, tmp); 2988 crc32cx(crc, crc, tmp3); 2989 br(Assembler::GE, CRC_by64_loop); 2990 adds(len, len, 64-4); 2991 br(Assembler::GE, CRC_by4_loop); 2992 adds(len, len, 4); 2993 br(Assembler::GT, CRC_by1_loop); 2994 BIND(L_exit); 2995 return; 2996 } 2997 2998 SkipIfEqual::SkipIfEqual( 2999 MacroAssembler* masm, const bool* flag_addr, bool value) { 3000 _masm = masm; 3001 unsigned long offset; 3002 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 3003 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 3004 _masm->cbzw(rscratch1, _label); 3005 } 3006 3007 SkipIfEqual::~SkipIfEqual() { 3008 _masm->bind(_label); 3009 } 3010 3011 void MacroAssembler::cmpptr(Register src1, Address src2) { 3012 unsigned long offset; 3013 adrp(rscratch1, src2, offset); 3014 ldr(rscratch1, Address(rscratch1, offset)); 3015 cmp(src1, rscratch1); 3016 } 3017 3018 void MacroAssembler::store_check(Register obj, Address dst) { 3019 store_check(obj); 3020 } 3021 3022 void MacroAssembler::store_check(Register obj) { 3023 // Does a store check for the oop in register obj. The content of 3024 // register obj is destroyed afterwards. 3025 3026 BarrierSet* bs = Universe::heap()->barrier_set(); 3027 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 3028 3029 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 3030 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3031 3032 lsr(obj, obj, CardTableModRefBS::card_shift); 3033 3034 assert(CardTableModRefBS::dirty_card_val() == 0, "must be"); 3035 3036 { 3037 ExternalAddress cardtable((address) ct->byte_map_base); 3038 unsigned long offset; 3039 adrp(rscratch1, cardtable, offset); 3040 assert(offset == 0, "byte_map_base is misaligned"); 3041 } 3042 3043 if (UseCondCardMark) { 3044 Label L_already_dirty; 3045 ldrb(rscratch2, Address(obj, rscratch1)); 3046 cbz(rscratch2, L_already_dirty); 3047 strb(zr, Address(obj, rscratch1)); 3048 bind(L_already_dirty); 3049 } else { 3050 strb(zr, Address(obj, rscratch1)); 3051 } 3052 } 3053 3054 void MacroAssembler::load_klass(Register dst, Register src) { 3055 if (UseCompressedClassPointers) { 3056 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3057 decode_klass_not_null(dst); 3058 } else { 3059 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3060 } 3061 } 3062 3063 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 3064 if (UseCompressedClassPointers) { 3065 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3066 if (Universe::narrow_klass_base() == NULL) { 3067 cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift()); 3068 return; 3069 } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3070 && Universe::narrow_klass_shift() == 0) { 3071 // Only the bottom 32 bits matter 3072 cmpw(trial_klass, tmp); 3073 return; 3074 } 3075 decode_klass_not_null(tmp); 3076 } else { 3077 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3078 } 3079 cmp(trial_klass, tmp); 3080 } 3081 3082 void MacroAssembler::load_prototype_header(Register dst, Register src) { 3083 load_klass(dst, src); 3084 ldr(dst, Address(dst, Klass::prototype_header_offset())); 3085 } 3086 3087 void MacroAssembler::store_klass(Register dst, Register src) { 3088 // FIXME: Should this be a store release? concurrent gcs assumes 3089 // klass length is valid if klass field is not null. 3090 if (UseCompressedClassPointers) { 3091 encode_klass_not_null(src); 3092 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3093 } else { 3094 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3095 } 3096 } 3097 3098 void MacroAssembler::store_klass_gap(Register dst, Register src) { 3099 if (UseCompressedClassPointers) { 3100 // Store to klass gap in destination 3101 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 3102 } 3103 } 3104 3105 // Algorithm must match oop.inline.hpp encode_heap_oop. 3106 void MacroAssembler::encode_heap_oop(Register d, Register s) { 3107 #ifdef ASSERT 3108 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 3109 #endif 3110 verify_oop(s, "broken oop in encode_heap_oop"); 3111 if (Universe::narrow_oop_base() == NULL) { 3112 if (Universe::narrow_oop_shift() != 0) { 3113 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3114 lsr(d, s, LogMinObjAlignmentInBytes); 3115 } else { 3116 mov(d, s); 3117 } 3118 } else { 3119 subs(d, s, rheapbase); 3120 csel(d, d, zr, Assembler::HS); 3121 lsr(d, d, LogMinObjAlignmentInBytes); 3122 3123 /* Old algorithm: is this any worse? 3124 Label nonnull; 3125 cbnz(r, nonnull); 3126 sub(r, r, rheapbase); 3127 bind(nonnull); 3128 lsr(r, r, LogMinObjAlignmentInBytes); 3129 */ 3130 } 3131 } 3132 3133 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3134 #ifdef ASSERT 3135 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 3136 if (CheckCompressedOops) { 3137 Label ok; 3138 cbnz(r, ok); 3139 stop("null oop passed to encode_heap_oop_not_null"); 3140 bind(ok); 3141 } 3142 #endif 3143 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 3144 if (Universe::narrow_oop_base() != NULL) { 3145 sub(r, r, rheapbase); 3146 } 3147 if (Universe::narrow_oop_shift() != 0) { 3148 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3149 lsr(r, r, LogMinObjAlignmentInBytes); 3150 } 3151 } 3152 3153 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 3154 #ifdef ASSERT 3155 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 3156 if (CheckCompressedOops) { 3157 Label ok; 3158 cbnz(src, ok); 3159 stop("null oop passed to encode_heap_oop_not_null2"); 3160 bind(ok); 3161 } 3162 #endif 3163 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 3164 3165 Register data = src; 3166 if (Universe::narrow_oop_base() != NULL) { 3167 sub(dst, src, rheapbase); 3168 data = dst; 3169 } 3170 if (Universe::narrow_oop_shift() != 0) { 3171 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3172 lsr(dst, data, LogMinObjAlignmentInBytes); 3173 data = dst; 3174 } 3175 if (data == src) 3176 mov(dst, src); 3177 } 3178 3179 void MacroAssembler::decode_heap_oop(Register d, Register s) { 3180 #ifdef ASSERT 3181 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 3182 #endif 3183 if (Universe::narrow_oop_base() == NULL) { 3184 if (Universe::narrow_oop_shift() != 0 || d != s) { 3185 lsl(d, s, Universe::narrow_oop_shift()); 3186 } 3187 } else { 3188 Label done; 3189 if (d != s) 3190 mov(d, s); 3191 cbz(s, done); 3192 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 3193 bind(done); 3194 } 3195 verify_oop(d, "broken oop in decode_heap_oop"); 3196 } 3197 3198 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3199 assert (UseCompressedOops, "should only be used for compressed headers"); 3200 assert (Universe::heap() != NULL, "java heap should be initialized"); 3201 // Cannot assert, unverified entry point counts instructions (see .ad file) 3202 // vtableStubs also counts instructions in pd_code_size_limit. 3203 // Also do not verify_oop as this is called by verify_oop. 3204 if (Universe::narrow_oop_shift() != 0) { 3205 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3206 if (Universe::narrow_oop_base() != NULL) { 3207 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3208 } else { 3209 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3210 } 3211 } else { 3212 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3213 } 3214 } 3215 3216 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 3217 assert (UseCompressedOops, "should only be used for compressed headers"); 3218 assert (Universe::heap() != NULL, "java heap should be initialized"); 3219 // Cannot assert, unverified entry point counts instructions (see .ad file) 3220 // vtableStubs also counts instructions in pd_code_size_limit. 3221 // Also do not verify_oop as this is called by verify_oop. 3222 if (Universe::narrow_oop_shift() != 0) { 3223 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3224 if (Universe::narrow_oop_base() != NULL) { 3225 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3226 } else { 3227 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3228 } 3229 } else { 3230 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3231 if (dst != src) { 3232 mov(dst, src); 3233 } 3234 } 3235 } 3236 3237 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3238 if (Universe::narrow_klass_base() == NULL) { 3239 if (Universe::narrow_klass_shift() != 0) { 3240 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3241 lsr(dst, src, LogKlassAlignmentInBytes); 3242 } else { 3243 if (dst != src) mov(dst, src); 3244 } 3245 return; 3246 } 3247 3248 if (use_XOR_for_compressed_class_base) { 3249 if (Universe::narrow_klass_shift() != 0) { 3250 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3251 lsr(dst, dst, LogKlassAlignmentInBytes); 3252 } else { 3253 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3254 } 3255 return; 3256 } 3257 3258 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3259 && Universe::narrow_klass_shift() == 0) { 3260 movw(dst, src); 3261 return; 3262 } 3263 3264 #ifdef ASSERT 3265 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 3266 #endif 3267 3268 Register rbase = dst; 3269 if (dst == src) rbase = rheapbase; 3270 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3271 sub(dst, src, rbase); 3272 if (Universe::narrow_klass_shift() != 0) { 3273 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3274 lsr(dst, dst, LogKlassAlignmentInBytes); 3275 } 3276 if (dst == src) reinit_heapbase(); 3277 } 3278 3279 void MacroAssembler::encode_klass_not_null(Register r) { 3280 encode_klass_not_null(r, r); 3281 } 3282 3283 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3284 Register rbase = dst; 3285 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3286 3287 if (Universe::narrow_klass_base() == NULL) { 3288 if (Universe::narrow_klass_shift() != 0) { 3289 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3290 lsl(dst, src, LogKlassAlignmentInBytes); 3291 } else { 3292 if (dst != src) mov(dst, src); 3293 } 3294 return; 3295 } 3296 3297 if (use_XOR_for_compressed_class_base) { 3298 if (Universe::narrow_klass_shift() != 0) { 3299 lsl(dst, src, LogKlassAlignmentInBytes); 3300 eor(dst, dst, (uint64_t)Universe::narrow_klass_base()); 3301 } else { 3302 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3303 } 3304 return; 3305 } 3306 3307 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3308 && Universe::narrow_klass_shift() == 0) { 3309 if (dst != src) 3310 movw(dst, src); 3311 movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32); 3312 return; 3313 } 3314 3315 // Cannot assert, unverified entry point counts instructions (see .ad file) 3316 // vtableStubs also counts instructions in pd_code_size_limit. 3317 // Also do not verify_oop as this is called by verify_oop. 3318 if (dst == src) rbase = rheapbase; 3319 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3320 if (Universe::narrow_klass_shift() != 0) { 3321 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3322 add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); 3323 } else { 3324 add(dst, rbase, src); 3325 } 3326 if (dst == src) reinit_heapbase(); 3327 } 3328 3329 void MacroAssembler::decode_klass_not_null(Register r) { 3330 decode_klass_not_null(r, r); 3331 } 3332 3333 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 3334 assert (UseCompressedOops, "should only be used for compressed oops"); 3335 assert (Universe::heap() != NULL, "java heap should be initialized"); 3336 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3337 3338 int oop_index = oop_recorder()->find_index(obj); 3339 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3340 3341 InstructionMark im(this); 3342 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3343 code_section()->relocate(inst_mark(), rspec); 3344 movz(dst, 0xDEAD, 16); 3345 movk(dst, 0xBEEF); 3346 } 3347 3348 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 3349 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3350 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3351 int index = oop_recorder()->find_index(k); 3352 assert(! Universe::heap()->is_in_reserved(k), "should not be an oop"); 3353 3354 InstructionMark im(this); 3355 RelocationHolder rspec = metadata_Relocation::spec(index); 3356 code_section()->relocate(inst_mark(), rspec); 3357 narrowKlass nk = Klass::encode_klass(k); 3358 movz(dst, (nk >> 16), 16); 3359 movk(dst, nk & 0xffff); 3360 } 3361 3362 void MacroAssembler::load_heap_oop(Register dst, Address src) 3363 { 3364 if (UseCompressedOops) { 3365 ldrw(dst, src); 3366 decode_heap_oop(dst); 3367 } else { 3368 ldr(dst, src); 3369 } 3370 } 3371 3372 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) 3373 { 3374 if (UseCompressedOops) { 3375 ldrw(dst, src); 3376 decode_heap_oop_not_null(dst); 3377 } else { 3378 ldr(dst, src); 3379 } 3380 } 3381 3382 void MacroAssembler::store_heap_oop(Address dst, Register src) { 3383 if (UseCompressedOops) { 3384 assert(!dst.uses(src), "not enough registers"); 3385 encode_heap_oop(src); 3386 strw(src, dst); 3387 } else 3388 str(src, dst); 3389 } 3390 3391 // Used for storing NULLs. 3392 void MacroAssembler::store_heap_oop_null(Address dst) { 3393 if (UseCompressedOops) { 3394 strw(zr, dst); 3395 } else 3396 str(zr, dst); 3397 } 3398 3399 #if INCLUDE_ALL_GCS 3400 void MacroAssembler::g1_write_barrier_pre(Register obj, 3401 Register pre_val, 3402 Register thread, 3403 Register tmp, 3404 bool tosca_live, 3405 bool expand_call) { 3406 // If expand_call is true then we expand the call_VM_leaf macro 3407 // directly to skip generating the check by 3408 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 3409 3410 assert(thread == rthread, "must be"); 3411 3412 Label done; 3413 Label runtime; 3414 3415 assert(pre_val != noreg, "check this code"); 3416 3417 if (obj != noreg) 3418 assert_different_registers(obj, pre_val, tmp); 3419 3420 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3421 PtrQueue::byte_offset_of_active())); 3422 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3423 PtrQueue::byte_offset_of_index())); 3424 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3425 PtrQueue::byte_offset_of_buf())); 3426 3427 3428 // Is marking active? 3429 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3430 ldrw(tmp, in_progress); 3431 } else { 3432 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 3433 ldrb(tmp, in_progress); 3434 } 3435 cbzw(tmp, done); 3436 3437 // Do we need to load the previous value? 3438 if (obj != noreg) { 3439 load_heap_oop(pre_val, Address(obj, 0)); 3440 } 3441 3442 // Is the previous value null? 3443 cbz(pre_val, done); 3444 3445 // Can we store original value in the thread's buffer? 3446 // Is index == 0? 3447 // (The index field is typed as size_t.) 3448 3449 ldr(tmp, index); // tmp := *index_adr 3450 cbz(tmp, runtime); // tmp == 0? 3451 // If yes, goto runtime 3452 3453 sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 3454 str(tmp, index); // *index_adr := tmp 3455 ldr(rscratch1, buffer); 3456 add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 3457 3458 // Record the previous value 3459 str(pre_val, Address(tmp, 0)); 3460 b(done); 3461 3462 bind(runtime); 3463 // save the live input values 3464 push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3465 3466 // Calling the runtime using the regular call_VM_leaf mechanism generates 3467 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 3468 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 3469 // 3470 // If we care generating the pre-barrier without a frame (e.g. in the 3471 // intrinsified Reference.get() routine) then ebp might be pointing to 3472 // the caller frame and so this check will most likely fail at runtime. 3473 // 3474 // Expanding the call directly bypasses the generation of the check. 3475 // So when we do not have have a full interpreter frame on the stack 3476 // expand_call should be passed true. 3477 3478 if (expand_call) { 3479 assert(pre_val != c_rarg1, "smashed arg"); 3480 pass_arg1(this, thread); 3481 pass_arg0(this, pre_val); 3482 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 3483 } else { 3484 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 3485 } 3486 3487 pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3488 3489 bind(done); 3490 } 3491 3492 void MacroAssembler::g1_write_barrier_post(Register store_addr, 3493 Register new_val, 3494 Register thread, 3495 Register tmp, 3496 Register tmp2) { 3497 assert(thread == rthread, "must be"); 3498 3499 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3500 PtrQueue::byte_offset_of_index())); 3501 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3502 PtrQueue::byte_offset_of_buf())); 3503 3504 BarrierSet* bs = Universe::heap()->barrier_set(); 3505 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 3506 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3507 3508 Label done; 3509 Label runtime; 3510 3511 // Does store cross heap regions? 3512 3513 eor(tmp, store_addr, new_val); 3514 lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes); 3515 cbz(tmp, done); 3516 3517 // crosses regions, storing NULL? 3518 3519 cbz(new_val, done); 3520 3521 // storing region crossing non-NULL, is card already dirty? 3522 3523 ExternalAddress cardtable((address) ct->byte_map_base); 3524 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3525 const Register card_addr = tmp; 3526 3527 lsr(card_addr, store_addr, CardTableModRefBS::card_shift); 3528 3529 unsigned long offset; 3530 adrp(tmp2, cardtable, offset); 3531 3532 // get the address of the card 3533 add(card_addr, card_addr, tmp2); 3534 ldrb(tmp2, Address(card_addr, offset)); 3535 cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3536 br(Assembler::EQ, done); 3537 3538 assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); 3539 3540 membar(Assembler::StoreLoad); 3541 3542 ldrb(tmp2, Address(card_addr, offset)); 3543 cbzw(tmp2, done); 3544 3545 // storing a region crossing, non-NULL oop, card is clean. 3546 // dirty card and log. 3547 3548 strb(zr, Address(card_addr, offset)); 3549 3550 ldr(rscratch1, queue_index); 3551 cbz(rscratch1, runtime); 3552 sub(rscratch1, rscratch1, wordSize); 3553 str(rscratch1, queue_index); 3554 3555 ldr(tmp2, buffer); 3556 str(card_addr, Address(tmp2, rscratch1)); 3557 b(done); 3558 3559 bind(runtime); 3560 // save the live input values 3561 push(store_addr->bit(true) | new_val->bit(true), sp); 3562 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 3563 pop(store_addr->bit(true) | new_val->bit(true), sp); 3564 3565 bind(done); 3566 } 3567 3568 #endif // INCLUDE_ALL_GCS 3569 3570 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 3571 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 3572 int index = oop_recorder()->allocate_metadata_index(obj); 3573 RelocationHolder rspec = metadata_Relocation::spec(index); 3574 return Address((address)obj, rspec); 3575 } 3576 3577 // Move an oop into a register. immediate is true if we want 3578 // immediate instrcutions, i.e. we are not going to patch this 3579 // instruction while the code is being executed by another thread. In 3580 // that case we can use move immediates rather than the constant pool. 3581 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { 3582 int oop_index; 3583 if (obj == NULL) { 3584 oop_index = oop_recorder()->allocate_oop_index(obj); 3585 } else { 3586 oop_index = oop_recorder()->find_index(obj); 3587 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3588 } 3589 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3590 if (! immediate) { 3591 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 3592 ldr_constant(dst, Address(dummy, rspec)); 3593 } else 3594 mov(dst, Address((address)obj, rspec)); 3595 } 3596 3597 // Move a metadata address into a register. 3598 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 3599 int oop_index; 3600 if (obj == NULL) { 3601 oop_index = oop_recorder()->allocate_metadata_index(obj); 3602 } else { 3603 oop_index = oop_recorder()->find_index(obj); 3604 } 3605 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 3606 mov(dst, Address((address)obj, rspec)); 3607 } 3608 3609 Address MacroAssembler::constant_oop_address(jobject obj) { 3610 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3611 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 3612 int oop_index = oop_recorder()->find_index(obj); 3613 return Address((address)obj, oop_Relocation::spec(oop_index)); 3614 } 3615 3616 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3617 void MacroAssembler::tlab_allocate(Register obj, 3618 Register var_size_in_bytes, 3619 int con_size_in_bytes, 3620 Register t1, 3621 Register t2, 3622 Label& slow_case) { 3623 assert_different_registers(obj, t2); 3624 assert_different_registers(obj, var_size_in_bytes); 3625 Register end = t2; 3626 3627 // verify_tlab(); 3628 3629 ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 3630 if (var_size_in_bytes == noreg) { 3631 lea(end, Address(obj, con_size_in_bytes)); 3632 } else { 3633 lea(end, Address(obj, var_size_in_bytes)); 3634 } 3635 ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 3636 cmp(end, rscratch1); 3637 br(Assembler::HI, slow_case); 3638 3639 // update the tlab top pointer 3640 str(end, Address(rthread, JavaThread::tlab_top_offset())); 3641 3642 // recover var_size_in_bytes if necessary 3643 if (var_size_in_bytes == end) { 3644 sub(var_size_in_bytes, var_size_in_bytes, obj); 3645 } 3646 // verify_tlab(); 3647 } 3648 3649 // Preserves r19, and r3. 3650 Register MacroAssembler::tlab_refill(Label& retry, 3651 Label& try_eden, 3652 Label& slow_case) { 3653 Register top = r0; 3654 Register t1 = r2; 3655 Register t2 = r4; 3656 assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); 3657 Label do_refill, discard_tlab; 3658 3659 if (!Universe::heap()->supports_inline_contig_alloc()) { 3660 // No allocation in the shared eden. 3661 b(slow_case); 3662 } 3663 3664 ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3665 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3666 3667 // calculate amount of free space 3668 sub(t1, t1, top); 3669 lsr(t1, t1, LogHeapWordSize); 3670 3671 // Retain tlab and allocate object in shared space if 3672 // the amount free in the tlab is too large to discard. 3673 3674 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3675 cmp(t1, rscratch1); 3676 br(Assembler::LE, discard_tlab); 3677 3678 // Retain 3679 // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3680 mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3681 add(rscratch1, rscratch1, t2); 3682 str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3683 3684 if (TLABStats) { 3685 // increment number of slow_allocations 3686 addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), 3687 1, rscratch1); 3688 } 3689 b(try_eden); 3690 3691 bind(discard_tlab); 3692 if (TLABStats) { 3693 // increment number of refills 3694 addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, 3695 rscratch1); 3696 // accumulate wastage -- t1 is amount free in tlab 3697 addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, 3698 rscratch1); 3699 } 3700 3701 // if tlab is currently allocated (top or end != null) then 3702 // fill [top, end + alignment_reserve) with array object 3703 cbz(top, do_refill); 3704 3705 // set up the mark word 3706 mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); 3707 str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); 3708 // set the length to the remaining space 3709 sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); 3710 add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); 3711 lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); 3712 strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); 3713 // set klass to intArrayKlass 3714 { 3715 unsigned long offset; 3716 // dubious reloc why not an oop reloc? 3717 adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), 3718 offset); 3719 ldr(t1, Address(rscratch1, offset)); 3720 } 3721 // store klass last. concurrent gcs assumes klass length is valid if 3722 // klass field is not null. 3723 store_klass(top, t1); 3724 3725 mov(t1, top); 3726 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3727 sub(t1, t1, rscratch1); 3728 incr_allocated_bytes(rthread, t1, 0, rscratch1); 3729 3730 // refill the tlab with an eden allocation 3731 bind(do_refill); 3732 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3733 lsl(t1, t1, LogHeapWordSize); 3734 // allocate new tlab, address returned in top 3735 eden_allocate(top, t1, 0, t2, slow_case); 3736 3737 // Check that t1 was preserved in eden_allocate. 3738 #ifdef ASSERT 3739 if (UseTLAB) { 3740 Label ok; 3741 Register tsize = r4; 3742 assert_different_registers(tsize, rthread, t1); 3743 str(tsize, Address(pre(sp, -16))); 3744 ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3745 lsl(tsize, tsize, LogHeapWordSize); 3746 cmp(t1, tsize); 3747 br(Assembler::EQ, ok); 3748 STOP("assert(t1 != tlab size)"); 3749 should_not_reach_here(); 3750 3751 bind(ok); 3752 ldr(tsize, Address(post(sp, 16))); 3753 } 3754 #endif 3755 str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3756 str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3757 add(top, top, t1); 3758 sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); 3759 str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3760 verify_tlab(); 3761 b(retry); 3762 3763 return rthread; // for use by caller 3764 } 3765 3766 // Defines obj, preserves var_size_in_bytes 3767 void MacroAssembler::eden_allocate(Register obj, 3768 Register var_size_in_bytes, 3769 int con_size_in_bytes, 3770 Register t1, 3771 Label& slow_case) { 3772 assert_different_registers(obj, var_size_in_bytes, t1); 3773 if (!Universe::heap()->supports_inline_contig_alloc()) { 3774 b(slow_case); 3775 } else { 3776 Register end = t1; 3777 Register heap_end = rscratch2; 3778 Label retry; 3779 bind(retry); 3780 { 3781 unsigned long offset; 3782 adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); 3783 ldr(heap_end, Address(rscratch1, offset)); 3784 } 3785 3786 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 3787 3788 // Get the current top of the heap 3789 { 3790 unsigned long offset; 3791 adrp(rscratch1, heap_top, offset); 3792 // Use add() here after ARDP, rather than lea(). 3793 // lea() does not generate anything if its offset is zero. 3794 // However, relocs expect to find either an ADD or a load/store 3795 // insn after an ADRP. add() always generates an ADD insn, even 3796 // for add(Rn, Rn, 0). 3797 add(rscratch1, rscratch1, offset); 3798 ldaxr(obj, rscratch1); 3799 } 3800 3801 // Adjust it my the size of our new object 3802 if (var_size_in_bytes == noreg) { 3803 lea(end, Address(obj, con_size_in_bytes)); 3804 } else { 3805 lea(end, Address(obj, var_size_in_bytes)); 3806 } 3807 3808 // if end < obj then we wrapped around high memory 3809 cmp(end, obj); 3810 br(Assembler::LO, slow_case); 3811 3812 cmp(end, heap_end); 3813 br(Assembler::HI, slow_case); 3814 3815 // If heap_top hasn't been changed by some other thread, update it. 3816 stlxr(rscratch1, end, rscratch1); 3817 cbnzw(rscratch1, retry); 3818 } 3819 } 3820 3821 void MacroAssembler::verify_tlab() { 3822 #ifdef ASSERT 3823 if (UseTLAB && VerifyOops) { 3824 Label next, ok; 3825 3826 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 3827 3828 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3829 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3830 cmp(rscratch2, rscratch1); 3831 br(Assembler::HS, next); 3832 STOP("assert(top >= start)"); 3833 should_not_reach_here(); 3834 3835 bind(next); 3836 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3837 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3838 cmp(rscratch2, rscratch1); 3839 br(Assembler::HS, ok); 3840 STOP("assert(top <= end)"); 3841 should_not_reach_here(); 3842 3843 bind(ok); 3844 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 3845 } 3846 #endif 3847 } 3848 3849 // Writes to stack successive pages until offset reached to check for 3850 // stack overflow + shadow pages. This clobbers tmp. 3851 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 3852 assert_different_registers(tmp, size, rscratch1); 3853 mov(tmp, sp); 3854 // Bang stack for total size given plus shadow page size. 3855 // Bang one page at a time because large size can bang beyond yellow and 3856 // red zones. 3857 Label loop; 3858 mov(rscratch1, os::vm_page_size()); 3859 bind(loop); 3860 lea(tmp, Address(tmp, -os::vm_page_size())); 3861 subsw(size, size, rscratch1); 3862 str(size, Address(tmp)); 3863 br(Assembler::GT, loop); 3864 3865 // Bang down shadow pages too. 3866 // At this point, (tmp-0) is the last address touched, so don't 3867 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3868 // was post-decremented.) Skip this address by starting at i=1, and 3869 // touch a few more pages below. N.B. It is important to touch all 3870 // the way down to and including i=StackShadowPages. 3871 for (int i = 0; i< StackShadowPages-1; i++) { 3872 // this could be any sized move but this is can be a debugging crumb 3873 // so the bigger the better. 3874 lea(tmp, Address(tmp, -os::vm_page_size())); 3875 str(size, Address(tmp)); 3876 } 3877 } 3878 3879 3880 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) { 3881 unsigned long off; 3882 adrp(r, Address(page, rtype), off); 3883 InstructionMark im(this); 3884 code_section()->relocate(inst_mark(), rtype); 3885 ldrw(zr, Address(r, off)); 3886 return inst_mark(); 3887 } 3888 3889 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 3890 InstructionMark im(this); 3891 code_section()->relocate(inst_mark(), rtype); 3892 ldrw(zr, Address(r, 0)); 3893 return inst_mark(); 3894 } 3895 3896 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { 3897 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 3898 if (uabs(pc() - dest.target()) >= (1LL << 32)) { 3899 guarantee(rtype == relocInfo::none 3900 || rtype == relocInfo::external_word_type 3901 || rtype == relocInfo::poll_type 3902 || rtype == relocInfo::poll_return_type, 3903 "can only use a fixed address with an ADRP"); 3904 // Out of range. This doesn't happen very often, but we have to 3905 // handle it 3906 mov(reg1, dest); 3907 byte_offset = 0; 3908 } else { 3909 InstructionMark im(this); 3910 code_section()->relocate(inst_mark(), dest.rspec()); 3911 byte_offset = (uint64_t)dest.target() & 0xfff; 3912 _adrp(reg1, dest.target()); 3913 } 3914 } 3915 3916 void MacroAssembler::build_frame(int framesize) { 3917 assert(framesize > 0, "framesize must be > 0"); 3918 if (framesize < ((1 << 9) + 2 * wordSize)) { 3919 sub(sp, sp, framesize); 3920 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3921 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 3922 } else { 3923 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 3924 if (PreserveFramePointer) mov(rfp, sp); 3925 if (framesize < ((1 << 12) + 2 * wordSize)) 3926 sub(sp, sp, framesize - 2 * wordSize); 3927 else { 3928 mov(rscratch1, framesize - 2 * wordSize); 3929 sub(sp, sp, rscratch1); 3930 } 3931 } 3932 } 3933 3934 void MacroAssembler::remove_frame(int framesize) { 3935 assert(framesize > 0, "framesize must be > 0"); 3936 if (framesize < ((1 << 9) + 2 * wordSize)) { 3937 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3938 add(sp, sp, framesize); 3939 } else { 3940 if (framesize < ((1 << 12) + 2 * wordSize)) 3941 add(sp, sp, framesize - 2 * wordSize); 3942 else { 3943 mov(rscratch1, framesize - 2 * wordSize); 3944 add(sp, sp, rscratch1); 3945 } 3946 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 3947 } 3948 } 3949 3950 3951 // Search for str1 in str2 and return index or -1 3952 void MacroAssembler::string_indexof(Register str2, Register str1, 3953 Register cnt2, Register cnt1, 3954 Register tmp1, Register tmp2, 3955 Register tmp3, Register tmp4, 3956 int icnt1, Register result) { 3957 Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH; 3958 3959 Register ch1 = rscratch1; 3960 Register ch2 = rscratch2; 3961 Register cnt1tmp = tmp1; 3962 Register cnt2tmp = tmp2; 3963 Register cnt1_neg = cnt1; 3964 Register cnt2_neg = cnt2; 3965 Register result_tmp = tmp4; 3966 3967 // Note, inline_string_indexOf() generates checks: 3968 // if (substr.count > string.count) return -1; 3969 // if (substr.count == 0) return 0; 3970 3971 // We have two strings, a source string in str2, cnt2 and a pattern string 3972 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1. 3973 3974 // For larger pattern and source we use a simplified Boyer Moore algorithm. 3975 // With a small pattern and source we use linear scan. 3976 3977 if (icnt1 == -1) { 3978 cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 3979 ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use 3980 br(LO, LINEARSEARCH); // a byte array. 3981 cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM 3982 br(HS, LINEARSEARCH); 3983 } 3984 3985 // The Boyer Moore alogorithm is based on the description here:- 3986 // 3987 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm 3988 // 3989 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule 3990 // and the 'Good Suffix' rule. 3991 // 3992 // These rules are essentially heuristics for how far we can shift the 3993 // pattern along the search string. 3994 // 3995 // The implementation here uses the 'Bad Character' rule only because of the 3996 // complexity of initialisation for the 'Good Suffix' rule. 3997 // 3998 // This is also known as the Boyer-Moore-Horspool algorithm:- 3999 // 4000 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm 4001 // 4002 // #define ASIZE 128 4003 // 4004 // int bm(unsigned char *x, int m, unsigned char *y, int n) { 4005 // int i, j; 4006 // unsigned c; 4007 // unsigned char bc[ASIZE]; 4008 // 4009 // /* Preprocessing */ 4010 // for (i = 0; i < ASIZE; ++i) 4011 // bc[i] = 0; 4012 // for (i = 0; i < m - 1; ) { 4013 // c = x[i]; 4014 // ++i; 4015 // if (c < ASIZE) bc[c] = i; 4016 // } 4017 // 4018 // /* Searching */ 4019 // j = 0; 4020 // while (j <= n - m) { 4021 // c = y[i+j]; 4022 // if (x[m-1] == c) 4023 // for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i); 4024 // if (i < 0) return j; 4025 // if (c < ASIZE) 4026 // j = j - bc[y[j+m-1]] + m; 4027 // else 4028 // j += 1; // Advance by 1 only if char >= ASIZE 4029 // } 4030 // } 4031 4032 if (icnt1 == -1) { 4033 BIND(BM); 4034 4035 Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP; 4036 Label BMADV, BMMATCH, BMCHECKEND; 4037 4038 Register cnt1end = tmp2; 4039 Register str2end = cnt2; 4040 Register skipch = tmp2; 4041 4042 // Restrict ASIZE to 128 to reduce stack space/initialisation. 4043 // The presence of chars >= ASIZE in the target string does not affect 4044 // performance, but we must be careful not to initialise them in the stack 4045 // array. 4046 // The presence of chars >= ASIZE in the source string may adversely affect 4047 // performance since we can only advance by one when we encounter one. 4048 4049 stp(zr, zr, pre(sp, -128)); 4050 for (int i = 1; i < 8; i++) 4051 stp(zr, zr, Address(sp, i*16)); 4052 4053 mov(cnt1tmp, 0); 4054 sub(cnt1end, cnt1, 1); 4055 BIND(BCLOOP); 4056 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4057 cmp(ch1, 128); 4058 add(cnt1tmp, cnt1tmp, 1); 4059 br(HS, BCSKIP); 4060 strb(cnt1tmp, Address(sp, ch1)); 4061 BIND(BCSKIP); 4062 cmp(cnt1tmp, cnt1end); 4063 br(LT, BCLOOP); 4064 4065 mov(result_tmp, str2); 4066 4067 sub(cnt2, cnt2, cnt1); 4068 add(str2end, str2, cnt2, LSL, 1); 4069 BIND(BMLOOPSTR2); 4070 sub(cnt1tmp, cnt1, 1); 4071 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4072 ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1))); 4073 cmp(ch1, skipch); 4074 br(NE, BMSKIP); 4075 subs(cnt1tmp, cnt1tmp, 1); 4076 br(LT, BMMATCH); 4077 BIND(BMLOOPSTR1); 4078 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4079 ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1))); 4080 cmp(ch1, ch2); 4081 br(NE, BMSKIP); 4082 subs(cnt1tmp, cnt1tmp, 1); 4083 br(GE, BMLOOPSTR1); 4084 BIND(BMMATCH); 4085 sub(result_tmp, str2, result_tmp); 4086 lsr(result, result_tmp, 1); 4087 add(sp, sp, 128); 4088 b(DONE); 4089 BIND(BMADV); 4090 add(str2, str2, 2); 4091 b(BMCHECKEND); 4092 BIND(BMSKIP); 4093 cmp(skipch, 128); 4094 br(HS, BMADV); 4095 ldrb(ch2, Address(sp, skipch)); 4096 add(str2, str2, cnt1, LSL, 1); 4097 sub(str2, str2, ch2, LSL, 1); 4098 BIND(BMCHECKEND); 4099 cmp(str2, str2end); 4100 br(LE, BMLOOPSTR2); 4101 add(sp, sp, 128); 4102 b(NOMATCH); 4103 } 4104 4105 BIND(LINEARSEARCH); 4106 { 4107 Label DO1, DO2, DO3; 4108 4109 Register str2tmp = tmp2; 4110 Register first = tmp3; 4111 4112 if (icnt1 == -1) 4113 { 4114 Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD; 4115 4116 cmp(cnt1, 4); 4117 br(LT, DOSHORT); 4118 4119 sub(cnt2, cnt2, cnt1); 4120 sub(cnt1, cnt1, 4); 4121 mov(result_tmp, cnt2); 4122 4123 lea(str1, Address(str1, cnt1, Address::uxtw(1))); 4124 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4125 sub(cnt1_neg, zr, cnt1, LSL, 1); 4126 sub(cnt2_neg, zr, cnt2, LSL, 1); 4127 ldr(first, Address(str1, cnt1_neg)); 4128 4129 BIND(FIRST_LOOP); 4130 ldr(ch2, Address(str2, cnt2_neg)); 4131 cmp(first, ch2); 4132 br(EQ, STR1_LOOP); 4133 BIND(STR2_NEXT); 4134 adds(cnt2_neg, cnt2_neg, 2); 4135 br(LE, FIRST_LOOP); 4136 b(NOMATCH); 4137 4138 BIND(STR1_LOOP); 4139 adds(cnt1tmp, cnt1_neg, 8); 4140 add(cnt2tmp, cnt2_neg, 8); 4141 br(GE, LAST_WORD); 4142 4143 BIND(STR1_NEXT); 4144 ldr(ch1, Address(str1, cnt1tmp)); 4145 ldr(ch2, Address(str2, cnt2tmp)); 4146 cmp(ch1, ch2); 4147 br(NE, STR2_NEXT); 4148 adds(cnt1tmp, cnt1tmp, 8); 4149 add(cnt2tmp, cnt2tmp, 8); 4150 br(LT, STR1_NEXT); 4151 4152 BIND(LAST_WORD); 4153 ldr(ch1, Address(str1)); 4154 sub(str2tmp, str2, cnt1_neg); // adjust to corresponding 4155 ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2 4156 cmp(ch1, ch2); 4157 br(NE, STR2_NEXT); 4158 b(MATCH); 4159 4160 BIND(DOSHORT); 4161 cmp(cnt1, 2); 4162 br(LT, DO1); 4163 br(GT, DO3); 4164 } 4165 4166 if (icnt1 == 4) { 4167 Label CH1_LOOP; 4168 4169 ldr(ch1, str1); 4170 sub(cnt2, cnt2, 4); 4171 mov(result_tmp, cnt2); 4172 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4173 sub(cnt2_neg, zr, cnt2, LSL, 1); 4174 4175 BIND(CH1_LOOP); 4176 ldr(ch2, Address(str2, cnt2_neg)); 4177 cmp(ch1, ch2); 4178 br(EQ, MATCH); 4179 adds(cnt2_neg, cnt2_neg, 2); 4180 br(LE, CH1_LOOP); 4181 b(NOMATCH); 4182 } 4183 4184 if (icnt1 == -1 || icnt1 == 2) { 4185 Label CH1_LOOP; 4186 4187 BIND(DO2); 4188 ldrw(ch1, str1); 4189 sub(cnt2, cnt2, 2); 4190 mov(result_tmp, cnt2); 4191 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4192 sub(cnt2_neg, zr, cnt2, LSL, 1); 4193 4194 BIND(CH1_LOOP); 4195 ldrw(ch2, Address(str2, cnt2_neg)); 4196 cmp(ch1, ch2); 4197 br(EQ, MATCH); 4198 adds(cnt2_neg, cnt2_neg, 2); 4199 br(LE, CH1_LOOP); 4200 b(NOMATCH); 4201 } 4202 4203 if (icnt1 == -1 || icnt1 == 3) { 4204 Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; 4205 4206 BIND(DO3); 4207 ldrw(first, str1); 4208 ldrh(ch1, Address(str1, 4)); 4209 4210 sub(cnt2, cnt2, 3); 4211 mov(result_tmp, cnt2); 4212 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4213 sub(cnt2_neg, zr, cnt2, LSL, 1); 4214 4215 BIND(FIRST_LOOP); 4216 ldrw(ch2, Address(str2, cnt2_neg)); 4217 cmpw(first, ch2); 4218 br(EQ, STR1_LOOP); 4219 BIND(STR2_NEXT); 4220 adds(cnt2_neg, cnt2_neg, 2); 4221 br(LE, FIRST_LOOP); 4222 b(NOMATCH); 4223 4224 BIND(STR1_LOOP); 4225 add(cnt2tmp, cnt2_neg, 4); 4226 ldrh(ch2, Address(str2, cnt2tmp)); 4227 cmp(ch1, ch2); 4228 br(NE, STR2_NEXT); 4229 b(MATCH); 4230 } 4231 4232 if (icnt1 == -1 || icnt1 == 1) { 4233 Label CH1_LOOP, HAS_ZERO; 4234 Label DO1_SHORT, DO1_LOOP; 4235 4236 BIND(DO1); 4237 ldrh(ch1, str1); 4238 cmp(cnt2, 4); 4239 br(LT, DO1_SHORT); 4240 4241 orr(ch1, ch1, ch1, LSL, 16); 4242 orr(ch1, ch1, ch1, LSL, 32); 4243 4244 sub(cnt2, cnt2, 4); 4245 mov(result_tmp, cnt2); 4246 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4247 sub(cnt2_neg, zr, cnt2, LSL, 1); 4248 4249 mov(tmp3, 0x0001000100010001); 4250 BIND(CH1_LOOP); 4251 ldr(ch2, Address(str2, cnt2_neg)); 4252 eor(ch2, ch1, ch2); 4253 sub(tmp1, ch2, tmp3); 4254 orr(tmp2, ch2, 0x7fff7fff7fff7fff); 4255 bics(tmp1, tmp1, tmp2); 4256 br(NE, HAS_ZERO); 4257 adds(cnt2_neg, cnt2_neg, 8); 4258 br(LT, CH1_LOOP); 4259 4260 cmp(cnt2_neg, 8); 4261 mov(cnt2_neg, 0); 4262 br(LT, CH1_LOOP); 4263 b(NOMATCH); 4264 4265 BIND(HAS_ZERO); 4266 rev(tmp1, tmp1); 4267 clz(tmp1, tmp1); 4268 add(cnt2_neg, cnt2_neg, tmp1, LSR, 3); 4269 b(MATCH); 4270 4271 BIND(DO1_SHORT); 4272 mov(result_tmp, cnt2); 4273 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4274 sub(cnt2_neg, zr, cnt2, LSL, 1); 4275 BIND(DO1_LOOP); 4276 ldrh(ch2, Address(str2, cnt2_neg)); 4277 cmpw(ch1, ch2); 4278 br(EQ, MATCH); 4279 adds(cnt2_neg, cnt2_neg, 2); 4280 br(LT, DO1_LOOP); 4281 } 4282 } 4283 BIND(NOMATCH); 4284 mov(result, -1); 4285 b(DONE); 4286 BIND(MATCH); 4287 add(result, result_tmp, cnt2_neg, ASR, 1); 4288 BIND(DONE); 4289 } 4290 4291 // Compare strings. 4292 void MacroAssembler::string_compare(Register str1, Register str2, 4293 Register cnt1, Register cnt2, Register result, 4294 Register tmp1) { 4295 Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING, 4296 NEXT_WORD, DIFFERENCE; 4297 4298 BLOCK_COMMENT("string_compare {"); 4299 4300 // Compute the minimum of the string lengths and save the difference. 4301 subsw(tmp1, cnt1, cnt2); 4302 cselw(cnt2, cnt1, cnt2, Assembler::LE); // min 4303 4304 // A very short string 4305 cmpw(cnt2, 4); 4306 br(Assembler::LT, SHORT_STRING); 4307 4308 // Check if the strings start at the same location. 4309 cmp(str1, str2); 4310 br(Assembler::EQ, LENGTH_DIFF); 4311 4312 // Compare longwords 4313 { 4314 subw(cnt2, cnt2, 4); // The last longword is a special case 4315 4316 // Move both string pointers to the last longword of their 4317 // strings, negate the remaining count, and convert it to bytes. 4318 lea(str1, Address(str1, cnt2, Address::uxtw(1))); 4319 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4320 sub(cnt2, zr, cnt2, LSL, 1); 4321 4322 // Loop, loading longwords and comparing them into rscratch2. 4323 bind(NEXT_WORD); 4324 ldr(result, Address(str1, cnt2)); 4325 ldr(cnt1, Address(str2, cnt2)); 4326 adds(cnt2, cnt2, wordSize); 4327 eor(rscratch2, result, cnt1); 4328 cbnz(rscratch2, DIFFERENCE); 4329 br(Assembler::LT, NEXT_WORD); 4330 4331 // Last longword. In the case where length == 4 we compare the 4332 // same longword twice, but that's still faster than another 4333 // conditional branch. 4334 4335 ldr(result, Address(str1)); 4336 ldr(cnt1, Address(str2)); 4337 eor(rscratch2, result, cnt1); 4338 cbz(rscratch2, LENGTH_DIFF); 4339 4340 // Find the first different characters in the longwords and 4341 // compute their difference. 4342 bind(DIFFERENCE); 4343 rev(rscratch2, rscratch2); 4344 clz(rscratch2, rscratch2); 4345 andr(rscratch2, rscratch2, -16); 4346 lsrv(result, result, rscratch2); 4347 uxthw(result, result); 4348 lsrv(cnt1, cnt1, rscratch2); 4349 uxthw(cnt1, cnt1); 4350 subw(result, result, cnt1); 4351 b(DONE); 4352 } 4353 4354 bind(SHORT_STRING); 4355 // Is the minimum length zero? 4356 cbz(cnt2, LENGTH_DIFF); 4357 4358 bind(SHORT_LOOP); 4359 load_unsigned_short(result, Address(post(str1, 2))); 4360 load_unsigned_short(cnt1, Address(post(str2, 2))); 4361 subw(result, result, cnt1); 4362 cbnz(result, DONE); 4363 sub(cnt2, cnt2, 1); 4364 cbnz(cnt2, SHORT_LOOP); 4365 4366 // Strings are equal up to min length. Return the length difference. 4367 bind(LENGTH_DIFF); 4368 mov(result, tmp1); 4369 4370 // That's it 4371 bind(DONE); 4372 4373 BLOCK_COMMENT("} string_compare"); 4374 } 4375 4376 4377 void MacroAssembler::string_equals(Register str1, Register str2, 4378 Register cnt, Register result, 4379 Register tmp1) { 4380 Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING, 4381 NEXT_WORD; 4382 4383 const Register tmp2 = rscratch1; 4384 assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2); 4385 4386 BLOCK_COMMENT("string_equals {"); 4387 4388 // Start by assuming that the strings are not equal. 4389 mov(result, zr); 4390 4391 // A very short string 4392 cmpw(cnt, 4); 4393 br(Assembler::LT, SHORT_STRING); 4394 4395 // Check if the strings start at the same location. 4396 cmp(str1, str2); 4397 br(Assembler::EQ, SAME_CHARS); 4398 4399 // Compare longwords 4400 { 4401 subw(cnt, cnt, 4); // The last longword is a special case 4402 4403 // Move both string pointers to the last longword of their 4404 // strings, negate the remaining count, and convert it to bytes. 4405 lea(str1, Address(str1, cnt, Address::uxtw(1))); 4406 lea(str2, Address(str2, cnt, Address::uxtw(1))); 4407 sub(cnt, zr, cnt, LSL, 1); 4408 4409 // Loop, loading longwords and comparing them into rscratch2. 4410 bind(NEXT_WORD); 4411 ldr(tmp1, Address(str1, cnt)); 4412 ldr(tmp2, Address(str2, cnt)); 4413 adds(cnt, cnt, wordSize); 4414 eor(rscratch2, tmp1, tmp2); 4415 cbnz(rscratch2, DONE); 4416 br(Assembler::LT, NEXT_WORD); 4417 4418 // Last longword. In the case where length == 4 we compare the 4419 // same longword twice, but that's still faster than another 4420 // conditional branch. 4421 4422 ldr(tmp1, Address(str1)); 4423 ldr(tmp2, Address(str2)); 4424 eor(rscratch2, tmp1, tmp2); 4425 cbz(rscratch2, SAME_CHARS); 4426 b(DONE); 4427 } 4428 4429 bind(SHORT_STRING); 4430 // Is the length zero? 4431 cbz(cnt, SAME_CHARS); 4432 4433 bind(SHORT_LOOP); 4434 load_unsigned_short(tmp1, Address(post(str1, 2))); 4435 load_unsigned_short(tmp2, Address(post(str2, 2))); 4436 subw(tmp1, tmp1, tmp2); 4437 cbnz(tmp1, DONE); 4438 sub(cnt, cnt, 1); 4439 cbnz(cnt, SHORT_LOOP); 4440 4441 // Strings are equal. 4442 bind(SAME_CHARS); 4443 mov(result, true); 4444 4445 // That's it 4446 bind(DONE); 4447 4448 BLOCK_COMMENT("} string_equals"); 4449 } 4450 4451 // Compare char[] arrays aligned to 4 bytes 4452 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4453 Register result, Register tmp1) 4454 { 4455 Register cnt1 = rscratch1; 4456 Register cnt2 = rscratch2; 4457 Register tmp2 = rscratch2; 4458 4459 Label SAME, DIFFER, NEXT, TAIL03, TAIL01; 4460 4461 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4462 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 4463 4464 BLOCK_COMMENT("char_arrays_equals {"); 4465 4466 // different until proven equal 4467 mov(result, false); 4468 4469 // same array? 4470 cmp(ary1, ary2); 4471 br(Assembler::EQ, SAME); 4472 4473 // ne if either null 4474 cbz(ary1, DIFFER); 4475 cbz(ary2, DIFFER); 4476 4477 // lengths ne? 4478 ldrw(cnt1, Address(ary1, length_offset)); 4479 ldrw(cnt2, Address(ary2, length_offset)); 4480 cmp(cnt1, cnt2); 4481 br(Assembler::NE, DIFFER); 4482 4483 lea(ary1, Address(ary1, base_offset)); 4484 lea(ary2, Address(ary2, base_offset)); 4485 4486 subs(cnt1, cnt1, 4); 4487 br(LT, TAIL03); 4488 4489 BIND(NEXT); 4490 ldr(tmp1, Address(post(ary1, 8))); 4491 ldr(tmp2, Address(post(ary2, 8))); 4492 subs(cnt1, cnt1, 4); 4493 eor(tmp1, tmp1, tmp2); 4494 cbnz(tmp1, DIFFER); 4495 br(GE, NEXT); 4496 4497 BIND(TAIL03); // 0-3 chars left, cnt1 = #chars left - 4 4498 tst(cnt1, 0b10); 4499 br(EQ, TAIL01); 4500 ldrw(tmp1, Address(post(ary1, 4))); 4501 ldrw(tmp2, Address(post(ary2, 4))); 4502 cmp(tmp1, tmp2); 4503 br(NE, DIFFER); 4504 BIND(TAIL01); // 0-1 chars left 4505 tst(cnt1, 0b01); 4506 br(EQ, SAME); 4507 ldrh(tmp1, ary1); 4508 ldrh(tmp2, ary2); 4509 cmp(tmp1, tmp2); 4510 br(NE, DIFFER); 4511 4512 BIND(SAME); 4513 mov(result, true); 4514 BIND(DIFFER); // result already set 4515 4516 BLOCK_COMMENT("} char_arrays_equals"); 4517 } 4518 4519 // encode char[] to byte[] in ISO_8859_1 4520 void MacroAssembler::encode_iso_array(Register src, Register dst, 4521 Register len, Register result, 4522 FloatRegister Vtmp1, FloatRegister Vtmp2, 4523 FloatRegister Vtmp3, FloatRegister Vtmp4) 4524 { 4525 Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1; 4526 Register tmp1 = rscratch1; 4527 4528 mov(result, len); // Save initial len 4529 4530 #ifndef BUILTIN_SIM 4531 subs(len, len, 32); 4532 br(LT, LOOP_8); 4533 4534 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions 4535 // to convert chars to bytes. These set the 'QC' bit in the FPSR if 4536 // any char could not fit in a byte, so clear the FPSR so we can test it. 4537 clear_fpsr(); 4538 4539 BIND(NEXT_32); 4540 ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); 4541 uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half 4542 uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half 4543 uqxtn(Vtmp2, T8B, Vtmp3, T8H); 4544 uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2 4545 get_fpsr(tmp1); 4546 cbnzw(tmp1, LOOP_8); 4547 st1(Vtmp1, Vtmp2, T16B, post(dst, 32)); 4548 subs(len, len, 32); 4549 add(src, src, 64); 4550 br(GE, NEXT_32); 4551 4552 BIND(LOOP_8); 4553 adds(len, len, 32-8); 4554 br(LT, LOOP_1); 4555 clear_fpsr(); // QC may be set from loop above, clear again 4556 BIND(NEXT_8); 4557 ld1(Vtmp1, T8H, src); 4558 uqxtn(Vtmp1, T8B, Vtmp1, T8H); 4559 get_fpsr(tmp1); 4560 cbnzw(tmp1, LOOP_1); 4561 st1(Vtmp1, T8B, post(dst, 8)); 4562 subs(len, len, 8); 4563 add(src, src, 16); 4564 br(GE, NEXT_8); 4565 4566 BIND(LOOP_1); 4567 adds(len, len, 8); 4568 br(LE, DONE); 4569 #else 4570 cbz(len, DONE); 4571 #endif 4572 BIND(NEXT_1); 4573 ldrh(tmp1, Address(post(src, 2))); 4574 tst(tmp1, 0xff00); 4575 br(NE, DONE); 4576 strb(tmp1, Address(post(dst, 1))); 4577 subs(len, len, 1); 4578 br(GT, NEXT_1); 4579 4580 BIND(DONE); 4581 sub(result, result, len); // Return index where we stopped 4582 }