1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include <sys/types.h> 27 28 #include "precompiled.hpp" 29 #include "asm/assembler.hpp" 30 #include "asm/assembler.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 33 #include "compiler/disassembler.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "nativeInst_aarch64.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/node.hpp" 39 #include "runtime/biasedLocking.hpp" 40 #include "runtime/icache.hpp" 41 #include "runtime/interfaceSupport.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "oops/oop.inline.hpp" 44 45 #if INCLUDE_ALL_GCS 46 #include "gc/g1/g1CollectedHeap.inline.hpp" 47 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 48 #include "gc/g1/heapRegion.hpp" 49 #endif 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) /* nothing */ 53 #define STOP(error) stop(error) 54 #else 55 #define BLOCK_COMMENT(str) block_comment(str) 56 #define STOP(error) block_comment(error); stop(error) 57 #endif 58 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 61 // Patch any kind of instruction; there may be several instructions. 62 // Return the total length (in bytes) of the instructions. 63 int MacroAssembler::pd_patch_instruction_size(address branch, address target) { 64 int instructions = 1; 65 assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant"); 66 long offset = (target - branch) >> 2; 67 unsigned insn = *(unsigned*)branch; 68 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) { 69 // Load register (literal) 70 Instruction_aarch64::spatch(branch, 23, 5, offset); 71 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 72 // Unconditional branch (immediate) 73 Instruction_aarch64::spatch(branch, 25, 0, offset); 74 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 75 // Conditional branch (immediate) 76 Instruction_aarch64::spatch(branch, 23, 5, offset); 77 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 78 // Compare & branch (immediate) 79 Instruction_aarch64::spatch(branch, 23, 5, offset); 80 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 81 // Test & branch (immediate) 82 Instruction_aarch64::spatch(branch, 18, 5, offset); 83 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 84 // PC-rel. addressing 85 offset = target-branch; 86 int shift = Instruction_aarch64::extract(insn, 31, 31); 87 if (shift) { 88 u_int64_t dest = (u_int64_t)target; 89 uint64_t pc_page = (uint64_t)branch >> 12; 90 uint64_t adr_page = (uint64_t)target >> 12; 91 unsigned offset_lo = dest & 0xfff; 92 offset = adr_page - pc_page; 93 94 // We handle 3 types of PC relative addressing 95 // 1 - adrp Rx, target_page 96 // ldr/str Ry, [Rx, #offset_in_page] 97 // 2 - adrp Rx, target_page 98 // add Ry, Rx, #offset_in_page 99 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 100 // In the first 2 cases we must check that Rx is the same in the adrp and the 101 // subsequent ldr/str or add instruction. Otherwise we could accidentally end 102 // up treating a type 3 relocation as a type 1 or 2 just because it happened 103 // to be followed by a random unrelated ldr/str or add instruction. 104 // 105 // In the case of a type 3 relocation, we know that these are only generated 106 // for the safepoint polling page, or for the card type byte map base so we 107 // assert as much and of course that the offset is 0. 108 // 109 unsigned insn2 = ((unsigned*)branch)[1]; 110 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 111 Instruction_aarch64::extract(insn, 4, 0) == 112 Instruction_aarch64::extract(insn2, 9, 5)) { 113 // Load/store register (unsigned immediate) 114 unsigned size = Instruction_aarch64::extract(insn2, 31, 30); 115 Instruction_aarch64::patch(branch + sizeof (unsigned), 116 21, 10, offset_lo >> size); 117 guarantee(((dest >> size) << size) == dest, "misaligned target"); 118 instructions = 2; 119 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 120 Instruction_aarch64::extract(insn, 4, 0) == 121 Instruction_aarch64::extract(insn2, 4, 0)) { 122 // add (immediate) 123 Instruction_aarch64::patch(branch + sizeof (unsigned), 124 21, 10, offset_lo); 125 instructions = 2; 126 } else { 127 assert((jbyte *)target == 128 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 129 target == StubRoutines::crc_table_addr() || 130 (address)target == os::get_polling_page(), 131 "adrp must be polling page or byte map base"); 132 assert(offset_lo == 0, "offset must be 0 for polling page or byte map base"); 133 } 134 } 135 int offset_lo = offset & 3; 136 offset >>= 2; 137 Instruction_aarch64::spatch(branch, 23, 5, offset); 138 Instruction_aarch64::patch(branch, 30, 29, offset_lo); 139 } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { 140 u_int64_t dest = (u_int64_t)target; 141 // Move wide constant 142 assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); 143 assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); 144 Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff); 145 Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff); 146 Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff); 147 assert(target_addr_for_insn(branch) == target, "should be"); 148 instructions = 3; 149 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 150 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 151 // nothing to do 152 assert(target == 0, "did not expect to relocate target for polling page load"); 153 } else { 154 ShouldNotReachHere(); 155 } 156 return instructions * NativeInstruction::instruction_size; 157 } 158 159 int MacroAssembler::patch_oop(address insn_addr, address o) { 160 int instructions; 161 unsigned insn = *(unsigned*)insn_addr; 162 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 163 164 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 165 // narrow OOPs by setting the upper 16 bits in the first 166 // instruction. 167 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 168 // Move narrow OOP 169 narrowOop n = oopDesc::encode_heap_oop((oop)o); 170 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 171 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 172 instructions = 2; 173 } else { 174 // Move wide OOP 175 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 176 uintptr_t dest = (uintptr_t)o; 177 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 178 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 179 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 180 instructions = 3; 181 } 182 return instructions * NativeInstruction::instruction_size; 183 } 184 185 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { 186 long offset = 0; 187 if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) { 188 // Load register (literal) 189 offset = Instruction_aarch64::sextract(insn, 23, 5); 190 return address(((uint64_t)insn_addr + (offset << 2))); 191 } else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 192 // Unconditional branch (immediate) 193 offset = Instruction_aarch64::sextract(insn, 25, 0); 194 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 195 // Conditional branch (immediate) 196 offset = Instruction_aarch64::sextract(insn, 23, 5); 197 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 198 // Compare & branch (immediate) 199 offset = Instruction_aarch64::sextract(insn, 23, 5); 200 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 201 // Test & branch (immediate) 202 offset = Instruction_aarch64::sextract(insn, 18, 5); 203 } else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { 204 // PC-rel. addressing 205 offset = Instruction_aarch64::extract(insn, 30, 29); 206 offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2; 207 int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0; 208 if (shift) { 209 offset <<= shift; 210 uint64_t target_page = ((uint64_t)insn_addr) + offset; 211 target_page &= ((uint64_t)-1) << shift; 212 // Return the target address for the following sequences 213 // 1 - adrp Rx, target_page 214 // ldr/str Ry, [Rx, #offset_in_page] 215 // 2 - adrp Rx, target_page ] 216 // add Ry, Rx, #offset_in_page 217 // 3 - adrp Rx, target_page (page aligned reloc, offset == 0) 218 // 219 // In the first two cases we check that the register is the same and 220 // return the target_page + the offset within the page. 221 // Otherwise we assume it is a page aligned relocation and return 222 // the target page only. The only cases this is generated is for 223 // the safepoint polling page or for the card table byte map base so 224 // we assert as much. 225 // 226 unsigned insn2 = ((unsigned*)insn_addr)[1]; 227 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 228 Instruction_aarch64::extract(insn, 4, 0) == 229 Instruction_aarch64::extract(insn2, 9, 5)) { 230 // Load/store register (unsigned immediate) 231 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 232 unsigned int size = Instruction_aarch64::extract(insn2, 31, 30); 233 return address(target_page + (byte_offset << size)); 234 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 235 Instruction_aarch64::extract(insn, 4, 0) == 236 Instruction_aarch64::extract(insn2, 4, 0)) { 237 // add (immediate) 238 unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 239 return address(target_page + byte_offset); 240 } else { 241 assert((jbyte *)target_page == 242 ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base || 243 (address)target_page == os::get_polling_page(), 244 "adrp must be polling page or byte map base"); 245 return (address)target_page; 246 } 247 } else { 248 ShouldNotReachHere(); 249 } 250 } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { 251 u_int32_t *insns = (u_int32_t *)insn_addr; 252 // Move wide constant: movz, movk, movk. See movptr(). 253 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 254 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 255 return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) 256 + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 257 + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 258 } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && 259 Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { 260 return 0; 261 } else { 262 ShouldNotReachHere(); 263 } 264 return address(((uint64_t)insn_addr + (offset << 2))); 265 } 266 267 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 268 dsb(Assembler::SY); 269 } 270 271 272 void MacroAssembler::reset_last_Java_frame(bool clear_fp, 273 bool clear_pc) { 274 // we must set sp to zero to clear frame 275 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 276 // must clear fp, so that compiled frames are not confused; it is 277 // possible that we need it only for debugging 278 if (clear_fp) { 279 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 280 } 281 282 if (clear_pc) { 283 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 284 } 285 } 286 287 // Calls to C land 288 // 289 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 290 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 291 // has to be reset to 0. This is required to allow proper stack traversal. 292 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 293 Register last_java_fp, 294 Register last_java_pc, 295 Register scratch) { 296 297 if (last_java_pc->is_valid()) { 298 str(last_java_pc, Address(rthread, 299 JavaThread::frame_anchor_offset() 300 + JavaFrameAnchor::last_Java_pc_offset())); 301 } 302 303 // determine last_java_sp register 304 if (last_java_sp == sp) { 305 mov(scratch, sp); 306 last_java_sp = scratch; 307 } else if (!last_java_sp->is_valid()) { 308 last_java_sp = esp; 309 } 310 311 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 312 313 // last_java_fp is optional 314 if (last_java_fp->is_valid()) { 315 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 316 } 317 } 318 319 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 320 Register last_java_fp, 321 address last_java_pc, 322 Register scratch) { 323 if (last_java_pc != NULL) { 324 adr(scratch, last_java_pc); 325 } else { 326 // FIXME: This is almost never correct. We should delete all 327 // cases of set_last_Java_frame with last_java_pc=NULL and use the 328 // correct return address instead. 329 adr(scratch, pc()); 330 } 331 332 str(scratch, Address(rthread, 333 JavaThread::frame_anchor_offset() 334 + JavaFrameAnchor::last_Java_pc_offset())); 335 336 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 337 } 338 339 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 340 Register last_java_fp, 341 Label &L, 342 Register scratch) { 343 if (L.is_bound()) { 344 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 345 } else { 346 InstructionMark im(this); 347 L.add_patch_at(code(), locator()); 348 set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch); 349 } 350 } 351 352 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) { 353 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 354 assert(CodeCache::find_blob(entry.target()) != NULL, 355 "destination of far call not found in code cache"); 356 if (far_branches()) { 357 unsigned long offset; 358 // We can use ADRP here because we know that the total size of 359 // the code cache cannot exceed 2Gb. 360 adrp(tmp, entry, offset); 361 add(tmp, tmp, offset); 362 if (cbuf) cbuf->set_insts_mark(); 363 blr(tmp); 364 } else { 365 if (cbuf) cbuf->set_insts_mark(); 366 bl(entry); 367 } 368 } 369 370 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) { 371 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 372 assert(CodeCache::find_blob(entry.target()) != NULL, 373 "destination of far call not found in code cache"); 374 if (far_branches()) { 375 unsigned long offset; 376 // We can use ADRP here because we know that the total size of 377 // the code cache cannot exceed 2Gb. 378 adrp(tmp, entry, offset); 379 add(tmp, tmp, offset); 380 if (cbuf) cbuf->set_insts_mark(); 381 br(tmp); 382 } else { 383 if (cbuf) cbuf->set_insts_mark(); 384 b(entry); 385 } 386 } 387 388 int MacroAssembler::biased_locking_enter(Register lock_reg, 389 Register obj_reg, 390 Register swap_reg, 391 Register tmp_reg, 392 bool swap_reg_contains_mark, 393 Label& done, 394 Label* slow_case, 395 BiasedLockingCounters* counters) { 396 assert(UseBiasedLocking, "why call this otherwise?"); 397 assert_different_registers(lock_reg, obj_reg, swap_reg); 398 399 if (PrintBiasedLockingStatistics && counters == NULL) 400 counters = BiasedLocking::counters(); 401 402 bool need_tmp_reg = false; 403 if (tmp_reg == noreg) { 404 tmp_reg = rscratch2; 405 } 406 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1); 407 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 408 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 409 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); 410 Address saved_mark_addr(lock_reg, 0); 411 412 // Biased locking 413 // See whether the lock is currently biased toward our thread and 414 // whether the epoch is still valid 415 // Note that the runtime guarantees sufficient alignment of JavaThread 416 // pointers to allow age to be placed into low bits 417 // First check to see whether biasing is even enabled for this object 418 Label cas_label; 419 int null_check_offset = -1; 420 if (!swap_reg_contains_mark) { 421 null_check_offset = offset(); 422 ldr(swap_reg, mark_addr); 423 } 424 andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); 425 cmp(tmp_reg, markOopDesc::biased_lock_pattern); 426 br(Assembler::NE, cas_label); 427 // The bias pattern is present in the object's header. Need to check 428 // whether the bias owner and the epoch are both still current. 429 load_prototype_header(tmp_reg, obj_reg); 430 orr(tmp_reg, tmp_reg, rthread); 431 eor(tmp_reg, swap_reg, tmp_reg); 432 andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place)); 433 if (counters != NULL) { 434 Label around; 435 cbnz(tmp_reg, around); 436 atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1); 437 b(done); 438 bind(around); 439 } else { 440 cbz(tmp_reg, done); 441 } 442 443 Label try_revoke_bias; 444 Label try_rebias; 445 446 // At this point we know that the header has the bias pattern and 447 // that we are not the bias owner in the current epoch. We need to 448 // figure out more details about the state of the header in order to 449 // know what operations can be legally performed on the object's 450 // header. 451 452 // If the low three bits in the xor result aren't clear, that means 453 // the prototype header is no longer biased and we have to revoke 454 // the bias on this object. 455 andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place); 456 cbnz(rscratch1, try_revoke_bias); 457 458 // Biasing is still enabled for this data type. See whether the 459 // epoch of the current bias is still valid, meaning that the epoch 460 // bits of the mark word are equal to the epoch bits of the 461 // prototype header. (Note that the prototype header's epoch bits 462 // only change at a safepoint.) If not, attempt to rebias the object 463 // toward the current thread. Note that we must be absolutely sure 464 // that the current epoch is invalid in order to do this because 465 // otherwise the manipulations it performs on the mark word are 466 // illegal. 467 andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place); 468 cbnz(rscratch1, try_rebias); 469 470 // The epoch of the current bias is still valid but we know nothing 471 // about the owner; it might be set or it might be clear. Try to 472 // acquire the bias of the object using an atomic operation. If this 473 // fails we will go in to the runtime to revoke the object's bias. 474 // Note that we first construct the presumed unbiased header so we 475 // don't accidentally blow away another thread's valid bias. 476 { 477 Label here; 478 mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 479 andr(swap_reg, swap_reg, rscratch1); 480 orr(tmp_reg, swap_reg, rthread); 481 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 482 // If the biasing toward our thread failed, this means that 483 // another thread succeeded in biasing it toward itself and we 484 // need to revoke that bias. The revocation will occur in the 485 // interpreter runtime in the slow case. 486 bind(here); 487 if (counters != NULL) { 488 atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()), 489 tmp_reg, rscratch1); 490 } 491 } 492 b(done); 493 494 bind(try_rebias); 495 // At this point we know the epoch has expired, meaning that the 496 // current "bias owner", if any, is actually invalid. Under these 497 // circumstances _only_, we are allowed to use the current header's 498 // value as the comparison value when doing the cas to acquire the 499 // bias in the current epoch. In other words, we allow transfer of 500 // the bias from one thread to another directly in this situation. 501 // 502 // FIXME: due to a lack of registers we currently blow away the age 503 // bits in this situation. Should attempt to preserve them. 504 { 505 Label here; 506 load_prototype_header(tmp_reg, obj_reg); 507 orr(tmp_reg, rthread, tmp_reg); 508 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); 509 // If the biasing toward our thread failed, then another thread 510 // succeeded in biasing it toward itself and we need to revoke that 511 // bias. The revocation will occur in the runtime in the slow case. 512 bind(here); 513 if (counters != NULL) { 514 atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()), 515 tmp_reg, rscratch1); 516 } 517 } 518 b(done); 519 520 bind(try_revoke_bias); 521 // The prototype mark in the klass doesn't have the bias bit set any 522 // more, indicating that objects of this data type are not supposed 523 // to be biased any more. We are going to try to reset the mark of 524 // this object to the prototype value and fall through to the 525 // CAS-based locking scheme. Note that if our CAS fails, it means 526 // that another thread raced us for the privilege of revoking the 527 // bias of this particular object, so it's okay to continue in the 528 // normal locking code. 529 // 530 // FIXME: due to a lack of registers we currently blow away the age 531 // bits in this situation. Should attempt to preserve them. 532 { 533 Label here, nope; 534 load_prototype_header(tmp_reg, obj_reg); 535 cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); 536 bind(here); 537 538 // Fall through to the normal CAS-based lock, because no matter what 539 // the result of the above CAS, some thread must have succeeded in 540 // removing the bias bit from the object's header. 541 if (counters != NULL) { 542 atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg, 543 rscratch1); 544 } 545 bind(nope); 546 } 547 548 bind(cas_label); 549 550 return null_check_offset; 551 } 552 553 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 554 assert(UseBiasedLocking, "why call this otherwise?"); 555 556 // Check for biased locking unlock case, which is a no-op 557 // Note: we do not have to check the thread ID for two reasons. 558 // First, the interpreter checks for IllegalMonitorStateException at 559 // a higher level. Second, if the bias was revoked while we held the 560 // lock, the object could not be rebiased toward another thread, so 561 // the bias bit would be clear. 562 ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 563 andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 564 cmp(temp_reg, markOopDesc::biased_lock_pattern); 565 br(Assembler::EQ, done); 566 } 567 568 569 // added to make this compile 570 571 REGISTER_DEFINITION(Register, noreg); 572 573 static void pass_arg0(MacroAssembler* masm, Register arg) { 574 if (c_rarg0 != arg ) { 575 masm->mov(c_rarg0, arg); 576 } 577 } 578 579 static void pass_arg1(MacroAssembler* masm, Register arg) { 580 if (c_rarg1 != arg ) { 581 masm->mov(c_rarg1, arg); 582 } 583 } 584 585 static void pass_arg2(MacroAssembler* masm, Register arg) { 586 if (c_rarg2 != arg ) { 587 masm->mov(c_rarg2, arg); 588 } 589 } 590 591 static void pass_arg3(MacroAssembler* masm, Register arg) { 592 if (c_rarg3 != arg ) { 593 masm->mov(c_rarg3, arg); 594 } 595 } 596 597 void MacroAssembler::call_VM_base(Register oop_result, 598 Register java_thread, 599 Register last_java_sp, 600 address entry_point, 601 int number_of_arguments, 602 bool check_exceptions) { 603 // determine java_thread register 604 if (!java_thread->is_valid()) { 605 java_thread = rthread; 606 } 607 608 // determine last_java_sp register 609 if (!last_java_sp->is_valid()) { 610 last_java_sp = esp; 611 } 612 613 // debugging support 614 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 615 assert(java_thread == rthread, "unexpected register"); 616 #ifdef ASSERT 617 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 618 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 619 #endif // ASSERT 620 621 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 622 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 623 624 // push java thread (becomes first argument of C function) 625 626 mov(c_rarg0, java_thread); 627 628 // set last Java frame before call 629 assert(last_java_sp != rfp, "can't use rfp"); 630 631 Label l; 632 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 633 634 // do the call, remove parameters 635 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 636 637 // reset last Java frame 638 // Only interpreter should have to clear fp 639 reset_last_Java_frame(true, true); 640 641 // C++ interp handles this in the interpreter 642 check_and_handle_popframe(java_thread); 643 check_and_handle_earlyret(java_thread); 644 645 if (check_exceptions) { 646 // check for pending exceptions (java_thread is set upon return) 647 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 648 Label ok; 649 cbz(rscratch1, ok); 650 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 651 br(rscratch1); 652 bind(ok); 653 } 654 655 // get oop result if there is one and reset the value in the thread 656 if (oop_result->is_valid()) { 657 get_vm_result(oop_result, java_thread); 658 } 659 } 660 661 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 662 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 663 } 664 665 // Maybe emit a call via a trampoline. If the code cache is small 666 // trampolines won't be emitted. 667 668 address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) { 669 assert(entry.rspec().type() == relocInfo::runtime_call_type 670 || entry.rspec().type() == relocInfo::opt_virtual_call_type 671 || entry.rspec().type() == relocInfo::static_call_type 672 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 673 674 unsigned int start_offset = offset(); 675 if (far_branches() && !Compile::current()->in_scratch_emit_size()) { 676 address stub = emit_trampoline_stub(start_offset, entry.target()); 677 if (stub == NULL) { 678 return NULL; // CodeCache is full 679 } 680 } 681 682 if (cbuf) cbuf->set_insts_mark(); 683 relocate(entry.rspec()); 684 if (Assembler::reachable_from_branch_at(pc(), entry.target())) { 685 bl(entry.target()); 686 } else { 687 bl(pc()); 688 } 689 // just need to return a non-null address 690 return pc(); 691 } 692 693 694 // Emit a trampoline stub for a call to a target which is too far away. 695 // 696 // code sequences: 697 // 698 // call-site: 699 // branch-and-link to <destination> or <trampoline stub> 700 // 701 // Related trampoline stub for this call site in the stub section: 702 // load the call target from the constant pool 703 // branch (LR still points to the call site above) 704 705 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 706 address dest) { 707 address stub = start_a_stub(Compile::MAX_stubs_size/2); 708 if (stub == NULL) { 709 return NULL; // CodeBuffer::expand failed 710 } 711 712 // Create a trampoline stub relocation which relates this trampoline stub 713 // with the call instruction at insts_call_instruction_offset in the 714 // instructions code-section. 715 align(wordSize); 716 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 717 + insts_call_instruction_offset)); 718 const int stub_start_offset = offset(); 719 720 // Now, create the trampoline stub's code: 721 // - load the call 722 // - call 723 Label target; 724 ldr(rscratch1, target); 725 br(rscratch1); 726 bind(target); 727 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 728 "should be"); 729 emit_int64((int64_t)dest); 730 731 const address stub_start_addr = addr_at(stub_start_offset); 732 733 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 734 735 end_a_stub(); 736 return stub; 737 } 738 739 address MacroAssembler::ic_call(address entry) { 740 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 741 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 742 // unsigned long offset; 743 // ldr_constant(rscratch2, const_ptr); 744 movptr(rscratch2, (uintptr_t)Universe::non_oop_word()); 745 return trampoline_call(Address(entry, rh)); 746 } 747 748 // Implementation of call_VM versions 749 750 void MacroAssembler::call_VM(Register oop_result, 751 address entry_point, 752 bool check_exceptions) { 753 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 754 } 755 756 void MacroAssembler::call_VM(Register oop_result, 757 address entry_point, 758 Register arg_1, 759 bool check_exceptions) { 760 pass_arg1(this, arg_1); 761 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 762 } 763 764 void MacroAssembler::call_VM(Register oop_result, 765 address entry_point, 766 Register arg_1, 767 Register arg_2, 768 bool check_exceptions) { 769 assert(arg_1 != c_rarg2, "smashed arg"); 770 pass_arg2(this, arg_2); 771 pass_arg1(this, arg_1); 772 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 773 } 774 775 void MacroAssembler::call_VM(Register oop_result, 776 address entry_point, 777 Register arg_1, 778 Register arg_2, 779 Register arg_3, 780 bool check_exceptions) { 781 assert(arg_1 != c_rarg3, "smashed arg"); 782 assert(arg_2 != c_rarg3, "smashed arg"); 783 pass_arg3(this, arg_3); 784 785 assert(arg_1 != c_rarg2, "smashed arg"); 786 pass_arg2(this, arg_2); 787 788 pass_arg1(this, arg_1); 789 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 790 } 791 792 void MacroAssembler::call_VM(Register oop_result, 793 Register last_java_sp, 794 address entry_point, 795 int number_of_arguments, 796 bool check_exceptions) { 797 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 798 } 799 800 void MacroAssembler::call_VM(Register oop_result, 801 Register last_java_sp, 802 address entry_point, 803 Register arg_1, 804 bool check_exceptions) { 805 pass_arg1(this, arg_1); 806 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 807 } 808 809 void MacroAssembler::call_VM(Register oop_result, 810 Register last_java_sp, 811 address entry_point, 812 Register arg_1, 813 Register arg_2, 814 bool check_exceptions) { 815 816 assert(arg_1 != c_rarg2, "smashed arg"); 817 pass_arg2(this, arg_2); 818 pass_arg1(this, arg_1); 819 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 820 } 821 822 void MacroAssembler::call_VM(Register oop_result, 823 Register last_java_sp, 824 address entry_point, 825 Register arg_1, 826 Register arg_2, 827 Register arg_3, 828 bool check_exceptions) { 829 assert(arg_1 != c_rarg3, "smashed arg"); 830 assert(arg_2 != c_rarg3, "smashed arg"); 831 pass_arg3(this, arg_3); 832 assert(arg_1 != c_rarg2, "smashed arg"); 833 pass_arg2(this, arg_2); 834 pass_arg1(this, arg_1); 835 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 836 } 837 838 839 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 840 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 841 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 842 verify_oop(oop_result, "broken oop in call_VM_base"); 843 } 844 845 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 846 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 847 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 848 } 849 850 void MacroAssembler::align(int modulus) { 851 while (offset() % modulus != 0) nop(); 852 } 853 854 // these are no-ops overridden by InterpreterMacroAssembler 855 856 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 857 858 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 859 860 861 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 862 Register tmp, 863 int offset) { 864 intptr_t value = *delayed_value_addr; 865 if (value != 0) 866 return RegisterOrConstant(value + offset); 867 868 // load indirectly to solve generation ordering problem 869 ldr(tmp, ExternalAddress((address) delayed_value_addr)); 870 871 if (offset != 0) 872 add(tmp, tmp, offset); 873 874 return RegisterOrConstant(tmp); 875 } 876 877 878 void MacroAssembler:: notify(int type) { 879 if (type == bytecode_start) { 880 // set_last_Java_frame(esp, rfp, (address)NULL); 881 Assembler:: notify(type); 882 // reset_last_Java_frame(true, false); 883 } 884 else 885 Assembler:: notify(type); 886 } 887 888 // Look up the method for a megamorphic invokeinterface call. 889 // The target method is determined by <intf_klass, itable_index>. 890 // The receiver klass is in recv_klass. 891 // On success, the result will be in method_result, and execution falls through. 892 // On failure, execution transfers to the given label. 893 void MacroAssembler::lookup_interface_method(Register recv_klass, 894 Register intf_klass, 895 RegisterOrConstant itable_index, 896 Register method_result, 897 Register scan_temp, 898 Label& L_no_such_interface) { 899 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 900 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 901 "caller must use same register for non-constant itable index as for method"); 902 903 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 904 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 905 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 906 int scan_step = itableOffsetEntry::size() * wordSize; 907 int vte_size = vtableEntry::size() * wordSize; 908 assert(vte_size == wordSize, "else adjust times_vte_scale"); 909 910 ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); 911 912 // %%% Could store the aligned, prescaled offset in the klassoop. 913 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 914 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 915 add(scan_temp, scan_temp, vtable_base); 916 if (HeapWordsPerLong > 1) { 917 // Round up to align_object_offset boundary 918 // see code for instanceKlass::start_of_itable! 919 round_to(scan_temp, BytesPerLong); 920 } 921 922 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 923 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 924 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 925 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 926 if (itentry_off) 927 add(recv_klass, recv_klass, itentry_off); 928 929 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 930 // if (scan->interface() == intf) { 931 // result = (klass + scan->offset() + itable_index); 932 // } 933 // } 934 Label search, found_method; 935 936 for (int peel = 1; peel >= 0; peel--) { 937 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 938 cmp(intf_klass, method_result); 939 940 if (peel) { 941 br(Assembler::EQ, found_method); 942 } else { 943 br(Assembler::NE, search); 944 // (invert the test to fall through to found_method...) 945 } 946 947 if (!peel) break; 948 949 bind(search); 950 951 // Check that the previous entry is non-null. A null entry means that 952 // the receiver class doesn't implement the interface, and wasn't the 953 // same as when the caller was compiled. 954 cbz(method_result, L_no_such_interface); 955 add(scan_temp, scan_temp, scan_step); 956 } 957 958 bind(found_method); 959 960 // Got a hit. 961 ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 962 ldr(method_result, Address(recv_klass, scan_temp)); 963 } 964 965 // virtual method calling 966 void MacroAssembler::lookup_virtual_method(Register recv_klass, 967 RegisterOrConstant vtable_index, 968 Register method_result) { 969 const int base = InstanceKlass::vtable_start_offset() * wordSize; 970 assert(vtableEntry::size() * wordSize == 8, 971 "adjust the scaling in the code below"); 972 int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes(); 973 974 if (vtable_index.is_register()) { 975 lea(method_result, Address(recv_klass, 976 vtable_index.as_register(), 977 Address::lsl(LogBytesPerWord))); 978 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 979 } else { 980 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 981 ldr(method_result, Address(recv_klass, vtable_offset_in_bytes)); 982 } 983 } 984 985 void MacroAssembler::check_klass_subtype(Register sub_klass, 986 Register super_klass, 987 Register temp_reg, 988 Label& L_success) { 989 Label L_failure; 990 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 991 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 992 bind(L_failure); 993 } 994 995 996 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 997 Register super_klass, 998 Register temp_reg, 999 Label* L_success, 1000 Label* L_failure, 1001 Label* L_slow_path, 1002 RegisterOrConstant super_check_offset) { 1003 assert_different_registers(sub_klass, super_klass, temp_reg); 1004 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1005 if (super_check_offset.is_register()) { 1006 assert_different_registers(sub_klass, super_klass, 1007 super_check_offset.as_register()); 1008 } else if (must_load_sco) { 1009 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1010 } 1011 1012 Label L_fallthrough; 1013 int label_nulls = 0; 1014 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1015 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1016 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1017 assert(label_nulls <= 1, "at most one NULL in the batch"); 1018 1019 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1020 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1021 Address super_check_offset_addr(super_klass, sco_offset); 1022 1023 // Hacked jmp, which may only be used just before L_fallthrough. 1024 #define final_jmp(label) \ 1025 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1026 else b(label) /*omit semi*/ 1027 1028 // If the pointers are equal, we are done (e.g., String[] elements). 1029 // This self-check enables sharing of secondary supertype arrays among 1030 // non-primary types such as array-of-interface. Otherwise, each such 1031 // type would need its own customized SSA. 1032 // We move this check to the front of the fast path because many 1033 // type checks are in fact trivially successful in this manner, 1034 // so we get a nicely predicted branch right at the start of the check. 1035 cmp(sub_klass, super_klass); 1036 br(Assembler::EQ, *L_success); 1037 1038 // Check the supertype display: 1039 if (must_load_sco) { 1040 ldrw(temp_reg, super_check_offset_addr); 1041 super_check_offset = RegisterOrConstant(temp_reg); 1042 } 1043 Address super_check_addr(sub_klass, super_check_offset); 1044 ldr(rscratch1, super_check_addr); 1045 cmp(super_klass, rscratch1); // load displayed supertype 1046 1047 // This check has worked decisively for primary supers. 1048 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1049 // (Secondary supers are interfaces and very deeply nested subtypes.) 1050 // This works in the same check above because of a tricky aliasing 1051 // between the super_cache and the primary super display elements. 1052 // (The 'super_check_addr' can address either, as the case requires.) 1053 // Note that the cache is updated below if it does not help us find 1054 // what we need immediately. 1055 // So if it was a primary super, we can just fail immediately. 1056 // Otherwise, it's the slow path for us (no success at this point). 1057 1058 if (super_check_offset.is_register()) { 1059 br(Assembler::EQ, *L_success); 1060 cmp(super_check_offset.as_register(), sc_offset); 1061 if (L_failure == &L_fallthrough) { 1062 br(Assembler::EQ, *L_slow_path); 1063 } else { 1064 br(Assembler::NE, *L_failure); 1065 final_jmp(*L_slow_path); 1066 } 1067 } else if (super_check_offset.as_constant() == sc_offset) { 1068 // Need a slow path; fast failure is impossible. 1069 if (L_slow_path == &L_fallthrough) { 1070 br(Assembler::EQ, *L_success); 1071 } else { 1072 br(Assembler::NE, *L_slow_path); 1073 final_jmp(*L_success); 1074 } 1075 } else { 1076 // No slow path; it's a fast decision. 1077 if (L_failure == &L_fallthrough) { 1078 br(Assembler::EQ, *L_success); 1079 } else { 1080 br(Assembler::NE, *L_failure); 1081 final_jmp(*L_success); 1082 } 1083 } 1084 1085 bind(L_fallthrough); 1086 1087 #undef final_jmp 1088 } 1089 1090 // These two are taken from x86, but they look generally useful 1091 1092 // scans count pointer sized words at [addr] for occurence of value, 1093 // generic 1094 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1095 Register scratch) { 1096 Label Lloop, Lexit; 1097 cbz(count, Lexit); 1098 bind(Lloop); 1099 ldr(scratch, post(addr, wordSize)); 1100 cmp(value, scratch); 1101 br(EQ, Lexit); 1102 sub(count, count, 1); 1103 cbnz(count, Lloop); 1104 bind(Lexit); 1105 } 1106 1107 // scans count 4 byte words at [addr] for occurence of value, 1108 // generic 1109 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1110 Register scratch) { 1111 Label Lloop, Lexit; 1112 cbz(count, Lexit); 1113 bind(Lloop); 1114 ldrw(scratch, post(addr, wordSize)); 1115 cmpw(value, scratch); 1116 br(EQ, Lexit); 1117 sub(count, count, 1); 1118 cbnz(count, Lloop); 1119 bind(Lexit); 1120 } 1121 1122 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1123 Register super_klass, 1124 Register temp_reg, 1125 Register temp2_reg, 1126 Label* L_success, 1127 Label* L_failure, 1128 bool set_cond_codes) { 1129 assert_different_registers(sub_klass, super_klass, temp_reg); 1130 if (temp2_reg != noreg) 1131 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1132 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1133 1134 Label L_fallthrough; 1135 int label_nulls = 0; 1136 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1137 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1138 assert(label_nulls <= 1, "at most one NULL in the batch"); 1139 1140 // a couple of useful fields in sub_klass: 1141 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1142 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1143 Address secondary_supers_addr(sub_klass, ss_offset); 1144 Address super_cache_addr( sub_klass, sc_offset); 1145 1146 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1147 1148 // Do a linear scan of the secondary super-klass chain. 1149 // This code is rarely used, so simplicity is a virtue here. 1150 // The repne_scan instruction uses fixed registers, which we must spill. 1151 // Don't worry too much about pre-existing connections with the input regs. 1152 1153 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1154 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1155 1156 // Get super_klass value into r0 (even if it was in r5 or r2). 1157 RegSet pushed_registers; 1158 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1159 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1160 1161 if (super_klass != r0 || UseCompressedOops) { 1162 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1163 } 1164 1165 push(pushed_registers, sp); 1166 1167 #ifndef PRODUCT 1168 mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); 1169 Address pst_counter_addr(rscratch2); 1170 ldr(rscratch1, pst_counter_addr); 1171 add(rscratch1, rscratch1, 1); 1172 str(rscratch1, pst_counter_addr); 1173 #endif //PRODUCT 1174 1175 // We will consult the secondary-super array. 1176 ldr(r5, secondary_supers_addr); 1177 // Load the array length. 1178 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1179 // Skip to start of data. 1180 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1181 1182 cmp(sp, zr); // Clear Z flag; SP is never zero 1183 // Scan R2 words at [R5] for an occurrence of R0. 1184 // Set NZ/Z based on last compare. 1185 repne_scan(r5, r0, r2, rscratch1); 1186 1187 // Unspill the temp. registers: 1188 pop(pushed_registers, sp); 1189 1190 br(Assembler::NE, *L_failure); 1191 1192 // Success. Cache the super we found and proceed in triumph. 1193 str(super_klass, super_cache_addr); 1194 1195 if (L_success != &L_fallthrough) { 1196 b(*L_success); 1197 } 1198 1199 #undef IS_A_TEMP 1200 1201 bind(L_fallthrough); 1202 } 1203 1204 1205 void MacroAssembler::verify_oop(Register reg, const char* s) { 1206 if (!VerifyOops) return; 1207 1208 // Pass register number to verify_oop_subroutine 1209 const char* b = NULL; 1210 { 1211 ResourceMark rm; 1212 stringStream ss; 1213 ss.print("verify_oop: %s: %s", reg->name(), s); 1214 b = code_string(ss.as_string()); 1215 } 1216 BLOCK_COMMENT("verify_oop {"); 1217 1218 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1219 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1220 1221 mov(r0, reg); 1222 mov(rscratch1, (address)b); 1223 1224 // call indirectly to solve generation ordering problem 1225 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1226 ldr(rscratch2, Address(rscratch2)); 1227 blr(rscratch2); 1228 1229 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1230 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1231 1232 BLOCK_COMMENT("} verify_oop"); 1233 } 1234 1235 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 1236 if (!VerifyOops) return; 1237 1238 const char* b = NULL; 1239 { 1240 ResourceMark rm; 1241 stringStream ss; 1242 ss.print("verify_oop_addr: %s", s); 1243 b = code_string(ss.as_string()); 1244 } 1245 BLOCK_COMMENT("verify_oop_addr {"); 1246 1247 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 1248 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 1249 1250 // addr may contain sp so we will have to adjust it based on the 1251 // pushes that we just did. 1252 if (addr.uses(sp)) { 1253 lea(r0, addr); 1254 ldr(r0, Address(r0, 4 * wordSize)); 1255 } else { 1256 ldr(r0, addr); 1257 } 1258 mov(rscratch1, (address)b); 1259 1260 // call indirectly to solve generation ordering problem 1261 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 1262 ldr(rscratch2, Address(rscratch2)); 1263 blr(rscratch2); 1264 1265 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 1266 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 1267 1268 BLOCK_COMMENT("} verify_oop_addr"); 1269 } 1270 1271 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1272 int extra_slot_offset) { 1273 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 1274 int stackElementSize = Interpreter::stackElementSize; 1275 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 1276 #ifdef ASSERT 1277 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 1278 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 1279 #endif 1280 if (arg_slot.is_constant()) { 1281 return Address(esp, arg_slot.as_constant() * stackElementSize 1282 + offset); 1283 } else { 1284 add(rscratch1, esp, arg_slot.as_register(), 1285 ext::uxtx, exact_log2(stackElementSize)); 1286 return Address(rscratch1, offset); 1287 } 1288 } 1289 1290 void MacroAssembler::call_VM_leaf_base(address entry_point, 1291 int number_of_arguments, 1292 Label *retaddr) { 1293 call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr); 1294 } 1295 1296 void MacroAssembler::call_VM_leaf_base1(address entry_point, 1297 int number_of_gp_arguments, 1298 int number_of_fp_arguments, 1299 ret_type type, 1300 Label *retaddr) { 1301 Label E, L; 1302 1303 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 1304 1305 // We add 1 to number_of_arguments because the thread in arg0 is 1306 // not counted 1307 mov(rscratch1, entry_point); 1308 blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type); 1309 if (retaddr) 1310 bind(*retaddr); 1311 1312 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 1313 maybe_isb(); 1314 } 1315 1316 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1317 call_VM_leaf_base(entry_point, number_of_arguments); 1318 } 1319 1320 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1321 pass_arg0(this, arg_0); 1322 call_VM_leaf_base(entry_point, 1); 1323 } 1324 1325 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1326 pass_arg0(this, arg_0); 1327 pass_arg1(this, arg_1); 1328 call_VM_leaf_base(entry_point, 2); 1329 } 1330 1331 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 1332 Register arg_1, Register arg_2) { 1333 pass_arg0(this, arg_0); 1334 pass_arg1(this, arg_1); 1335 pass_arg2(this, arg_2); 1336 call_VM_leaf_base(entry_point, 3); 1337 } 1338 1339 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1340 pass_arg0(this, arg_0); 1341 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1342 } 1343 1344 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1345 1346 assert(arg_0 != c_rarg1, "smashed arg"); 1347 pass_arg1(this, arg_1); 1348 pass_arg0(this, arg_0); 1349 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1350 } 1351 1352 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1353 assert(arg_0 != c_rarg2, "smashed arg"); 1354 assert(arg_1 != c_rarg2, "smashed arg"); 1355 pass_arg2(this, arg_2); 1356 assert(arg_0 != c_rarg1, "smashed arg"); 1357 pass_arg1(this, arg_1); 1358 pass_arg0(this, arg_0); 1359 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1360 } 1361 1362 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1363 assert(arg_0 != c_rarg3, "smashed arg"); 1364 assert(arg_1 != c_rarg3, "smashed arg"); 1365 assert(arg_2 != c_rarg3, "smashed arg"); 1366 pass_arg3(this, arg_3); 1367 assert(arg_0 != c_rarg2, "smashed arg"); 1368 assert(arg_1 != c_rarg2, "smashed arg"); 1369 pass_arg2(this, arg_2); 1370 assert(arg_0 != c_rarg1, "smashed arg"); 1371 pass_arg1(this, arg_1); 1372 pass_arg0(this, arg_0); 1373 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1374 } 1375 1376 void MacroAssembler::null_check(Register reg, int offset) { 1377 if (needs_explicit_null_check(offset)) { 1378 // provoke OS NULL exception if reg = NULL by 1379 // accessing M[reg] w/o changing any registers 1380 // NOTE: this is plenty to provoke a segv 1381 ldr(zr, Address(reg)); 1382 } else { 1383 // nothing to do, (later) access of M[reg + offset] 1384 // will provoke OS NULL exception if reg = NULL 1385 } 1386 } 1387 1388 // MacroAssembler protected routines needed to implement 1389 // public methods 1390 1391 void MacroAssembler::mov(Register r, Address dest) { 1392 code_section()->relocate(pc(), dest.rspec()); 1393 u_int64_t imm64 = (u_int64_t)dest.target(); 1394 movptr(r, imm64); 1395 } 1396 1397 // Move a constant pointer into r. In AArch64 mode the virtual 1398 // address space is 48 bits in size, so we only need three 1399 // instructions to create a patchable instruction sequence that can 1400 // reach anywhere. 1401 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 1402 #ifndef PRODUCT 1403 { 1404 char buffer[64]; 1405 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1406 block_comment(buffer); 1407 } 1408 #endif 1409 assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); 1410 movz(r, imm64 & 0xffff); 1411 imm64 >>= 16; 1412 movk(r, imm64 & 0xffff, 16); 1413 imm64 >>= 16; 1414 movk(r, imm64 & 0xffff, 32); 1415 } 1416 1417 // Macro to mov replicated immediate to vector register. 1418 // Vd will get the following values for different arrangements in T 1419 // imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh 1420 // imm32 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 1421 // imm32 == hex 0000efgh T4H: Vd = efghefghefghefgh 1422 // imm32 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 1423 // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 1424 // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 1425 // T1D/T2D: invalid 1426 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { 1427 assert(T != T1D && T != T2D, "invalid arrangement"); 1428 if (T == T8B || T == T16B) { 1429 assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); 1430 movi(Vd, T, imm32 & 0xff, 0); 1431 return; 1432 } 1433 u_int32_t nimm32 = ~imm32; 1434 if (T == T4H || T == T8H) { 1435 assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); 1436 imm32 &= 0xffff; 1437 nimm32 &= 0xffff; 1438 } 1439 u_int32_t x = imm32; 1440 int movi_cnt = 0; 1441 int movn_cnt = 0; 1442 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } 1443 x = nimm32; 1444 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } 1445 if (movn_cnt < movi_cnt) imm32 = nimm32; 1446 unsigned lsl = 0; 1447 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1448 if (movn_cnt < movi_cnt) 1449 mvni(Vd, T, imm32 & 0xff, lsl); 1450 else 1451 movi(Vd, T, imm32 & 0xff, lsl); 1452 imm32 >>= 8; lsl += 8; 1453 while (imm32) { 1454 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } 1455 if (movn_cnt < movi_cnt) 1456 bici(Vd, T, imm32 & 0xff, lsl); 1457 else 1458 orri(Vd, T, imm32 & 0xff, lsl); 1459 lsl += 8; imm32 >>= 8; 1460 } 1461 } 1462 1463 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) 1464 { 1465 #ifndef PRODUCT 1466 { 1467 char buffer[64]; 1468 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64); 1469 block_comment(buffer); 1470 } 1471 #endif 1472 if (operand_valid_for_logical_immediate(false, imm64)) { 1473 orr(dst, zr, imm64); 1474 } else { 1475 // we can use a combination of MOVZ or MOVN with 1476 // MOVK to build up the constant 1477 u_int64_t imm_h[4]; 1478 int zero_count = 0; 1479 int neg_count = 0; 1480 int i; 1481 for (i = 0; i < 4; i++) { 1482 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 1483 if (imm_h[i] == 0) { 1484 zero_count++; 1485 } else if (imm_h[i] == 0xffffL) { 1486 neg_count++; 1487 } 1488 } 1489 if (zero_count == 4) { 1490 // one MOVZ will do 1491 movz(dst, 0); 1492 } else if (neg_count == 4) { 1493 // one MOVN will do 1494 movn(dst, 0); 1495 } else if (zero_count == 3) { 1496 for (i = 0; i < 4; i++) { 1497 if (imm_h[i] != 0L) { 1498 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1499 break; 1500 } 1501 } 1502 } else if (neg_count == 3) { 1503 // one MOVN will do 1504 for (int i = 0; i < 4; i++) { 1505 if (imm_h[i] != 0xffffL) { 1506 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1507 break; 1508 } 1509 } 1510 } else if (zero_count == 2) { 1511 // one MOVZ and one MOVK will do 1512 for (i = 0; i < 3; i++) { 1513 if (imm_h[i] != 0L) { 1514 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1515 i++; 1516 break; 1517 } 1518 } 1519 for (;i < 4; i++) { 1520 if (imm_h[i] != 0L) { 1521 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1522 } 1523 } 1524 } else if (neg_count == 2) { 1525 // one MOVN and one MOVK will do 1526 for (i = 0; i < 4; i++) { 1527 if (imm_h[i] != 0xffffL) { 1528 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1529 i++; 1530 break; 1531 } 1532 } 1533 for (;i < 4; i++) { 1534 if (imm_h[i] != 0xffffL) { 1535 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1536 } 1537 } 1538 } else if (zero_count == 1) { 1539 // one MOVZ and two MOVKs will do 1540 for (i = 0; i < 4; i++) { 1541 if (imm_h[i] != 0L) { 1542 movz(dst, (u_int32_t)imm_h[i], (i << 4)); 1543 i++; 1544 break; 1545 } 1546 } 1547 for (;i < 4; i++) { 1548 if (imm_h[i] != 0x0L) { 1549 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1550 } 1551 } 1552 } else if (neg_count == 1) { 1553 // one MOVN and two MOVKs will do 1554 for (i = 0; i < 4; i++) { 1555 if (imm_h[i] != 0xffffL) { 1556 movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); 1557 i++; 1558 break; 1559 } 1560 } 1561 for (;i < 4; i++) { 1562 if (imm_h[i] != 0xffffL) { 1563 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1564 } 1565 } 1566 } else { 1567 // use a MOVZ and 3 MOVKs (makes it easier to debug) 1568 movz(dst, (u_int32_t)imm_h[0], 0); 1569 for (i = 1; i < 4; i++) { 1570 movk(dst, (u_int32_t)imm_h[i], (i << 4)); 1571 } 1572 } 1573 } 1574 } 1575 1576 void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) 1577 { 1578 #ifndef PRODUCT 1579 { 1580 char buffer[64]; 1581 snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32); 1582 block_comment(buffer); 1583 } 1584 #endif 1585 if (operand_valid_for_logical_immediate(true, imm32)) { 1586 orrw(dst, zr, imm32); 1587 } else { 1588 // we can use MOVZ, MOVN or two calls to MOVK to build up the 1589 // constant 1590 u_int32_t imm_h[2]; 1591 imm_h[0] = imm32 & 0xffff; 1592 imm_h[1] = ((imm32 >> 16) & 0xffff); 1593 if (imm_h[0] == 0) { 1594 movzw(dst, imm_h[1], 16); 1595 } else if (imm_h[0] == 0xffff) { 1596 movnw(dst, imm_h[1] ^ 0xffff, 16); 1597 } else if (imm_h[1] == 0) { 1598 movzw(dst, imm_h[0], 0); 1599 } else if (imm_h[1] == 0xffff) { 1600 movnw(dst, imm_h[0] ^ 0xffff, 0); 1601 } else { 1602 // use a MOVZ and MOVK (makes it easier to debug) 1603 movzw(dst, imm_h[0], 0); 1604 movkw(dst, imm_h[1], 16); 1605 } 1606 } 1607 } 1608 1609 // Form an address from base + offset in Rd. Rd may or may 1610 // not actually be used: you must use the Address that is returned. 1611 // It is up to you to ensure that the shift provided matches the size 1612 // of your data. 1613 Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) { 1614 if (Address::offset_ok_for_immed(byte_offset, shift)) 1615 // It fits; no need for any heroics 1616 return Address(base, byte_offset); 1617 1618 // Don't do anything clever with negative or misaligned offsets 1619 unsigned mask = (1 << shift) - 1; 1620 if (byte_offset < 0 || byte_offset & mask) { 1621 mov(Rd, byte_offset); 1622 add(Rd, base, Rd); 1623 return Address(Rd); 1624 } 1625 1626 // See if we can do this with two 12-bit offsets 1627 { 1628 unsigned long word_offset = byte_offset >> shift; 1629 unsigned long masked_offset = word_offset & 0xfff000; 1630 if (Address::offset_ok_for_immed(word_offset - masked_offset) 1631 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 1632 add(Rd, base, masked_offset << shift); 1633 word_offset -= masked_offset; 1634 return Address(Rd, word_offset << shift); 1635 } 1636 } 1637 1638 // Do it the hard way 1639 mov(Rd, byte_offset); 1640 add(Rd, base, Rd); 1641 return Address(Rd); 1642 } 1643 1644 void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) { 1645 Label retry_load; 1646 bind(retry_load); 1647 // flush and load exclusive from the memory location 1648 ldxrw(tmp, counter_addr); 1649 addw(tmp, tmp, 1); 1650 // if we store+flush with no intervening write tmp wil be zero 1651 stxrw(tmp, tmp, counter_addr); 1652 cbnzw(tmp, retry_load); 1653 } 1654 1655 1656 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 1657 bool want_remainder, Register scratch) 1658 { 1659 // Full implementation of Java idiv and irem. The function 1660 // returns the (pc) offset of the div instruction - may be needed 1661 // for implicit exceptions. 1662 // 1663 // constraint : ra/rb =/= scratch 1664 // normal case 1665 // 1666 // input : ra: dividend 1667 // rb: divisor 1668 // 1669 // result: either 1670 // quotient (= ra idiv rb) 1671 // remainder (= ra irem rb) 1672 1673 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1674 1675 int idivl_offset = offset(); 1676 if (! want_remainder) { 1677 sdivw(result, ra, rb); 1678 } else { 1679 sdivw(scratch, ra, rb); 1680 Assembler::msubw(result, scratch, rb, ra); 1681 } 1682 1683 return idivl_offset; 1684 } 1685 1686 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 1687 bool want_remainder, Register scratch) 1688 { 1689 // Full implementation of Java ldiv and lrem. The function 1690 // returns the (pc) offset of the div instruction - may be needed 1691 // for implicit exceptions. 1692 // 1693 // constraint : ra/rb =/= scratch 1694 // normal case 1695 // 1696 // input : ra: dividend 1697 // rb: divisor 1698 // 1699 // result: either 1700 // quotient (= ra idiv rb) 1701 // remainder (= ra irem rb) 1702 1703 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 1704 1705 int idivq_offset = offset(); 1706 if (! want_remainder) { 1707 sdiv(result, ra, rb); 1708 } else { 1709 sdiv(scratch, ra, rb); 1710 Assembler::msub(result, scratch, rb, ra); 1711 } 1712 1713 return idivq_offset; 1714 } 1715 1716 // MacroAssembler routines found actually to be needed 1717 1718 void MacroAssembler::push(Register src) 1719 { 1720 str(src, Address(pre(esp, -1 * wordSize))); 1721 } 1722 1723 void MacroAssembler::pop(Register dst) 1724 { 1725 ldr(dst, Address(post(esp, 1 * wordSize))); 1726 } 1727 1728 // Note: load_unsigned_short used to be called load_unsigned_word. 1729 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1730 int off = offset(); 1731 ldrh(dst, src); 1732 return off; 1733 } 1734 1735 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1736 int off = offset(); 1737 ldrb(dst, src); 1738 return off; 1739 } 1740 1741 int MacroAssembler::load_signed_short(Register dst, Address src) { 1742 int off = offset(); 1743 ldrsh(dst, src); 1744 return off; 1745 } 1746 1747 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1748 int off = offset(); 1749 ldrsb(dst, src); 1750 return off; 1751 } 1752 1753 int MacroAssembler::load_signed_short32(Register dst, Address src) { 1754 int off = offset(); 1755 ldrshw(dst, src); 1756 return off; 1757 } 1758 1759 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 1760 int off = offset(); 1761 ldrsbw(dst, src); 1762 return off; 1763 } 1764 1765 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1766 switch (size_in_bytes) { 1767 case 8: ldr(dst, src); break; 1768 case 4: ldrw(dst, src); break; 1769 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1770 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1771 default: ShouldNotReachHere(); 1772 } 1773 } 1774 1775 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1776 switch (size_in_bytes) { 1777 case 8: str(src, dst); break; 1778 case 4: strw(src, dst); break; 1779 case 2: strh(src, dst); break; 1780 case 1: strb(src, dst); break; 1781 default: ShouldNotReachHere(); 1782 } 1783 } 1784 1785 void MacroAssembler::decrementw(Register reg, int value) 1786 { 1787 if (value < 0) { incrementw(reg, -value); return; } 1788 if (value == 0) { return; } 1789 if (value < (1 << 12)) { subw(reg, reg, value); return; } 1790 /* else */ { 1791 guarantee(reg != rscratch2, "invalid dst for register decrement"); 1792 movw(rscratch2, (unsigned)value); 1793 subw(reg, reg, rscratch2); 1794 } 1795 } 1796 1797 void MacroAssembler::decrement(Register reg, int value) 1798 { 1799 if (value < 0) { increment(reg, -value); return; } 1800 if (value == 0) { return; } 1801 if (value < (1 << 12)) { sub(reg, reg, value); return; } 1802 /* else */ { 1803 assert(reg != rscratch2, "invalid dst for register decrement"); 1804 mov(rscratch2, (unsigned long)value); 1805 sub(reg, reg, rscratch2); 1806 } 1807 } 1808 1809 void MacroAssembler::decrementw(Address dst, int value) 1810 { 1811 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 1812 ldrw(rscratch1, dst); 1813 decrementw(rscratch1, value); 1814 strw(rscratch1, dst); 1815 } 1816 1817 void MacroAssembler::decrement(Address dst, int value) 1818 { 1819 assert(!dst.uses(rscratch1), "invalid address for decrement"); 1820 ldr(rscratch1, dst); 1821 decrement(rscratch1, value); 1822 str(rscratch1, dst); 1823 } 1824 1825 void MacroAssembler::incrementw(Register reg, int value) 1826 { 1827 if (value < 0) { decrementw(reg, -value); return; } 1828 if (value == 0) { return; } 1829 if (value < (1 << 12)) { addw(reg, reg, value); return; } 1830 /* else */ { 1831 assert(reg != rscratch2, "invalid dst for register increment"); 1832 movw(rscratch2, (unsigned)value); 1833 addw(reg, reg, rscratch2); 1834 } 1835 } 1836 1837 void MacroAssembler::increment(Register reg, int value) 1838 { 1839 if (value < 0) { decrement(reg, -value); return; } 1840 if (value == 0) { return; } 1841 if (value < (1 << 12)) { add(reg, reg, value); return; } 1842 /* else */ { 1843 assert(reg != rscratch2, "invalid dst for register increment"); 1844 movw(rscratch2, (unsigned)value); 1845 add(reg, reg, rscratch2); 1846 } 1847 } 1848 1849 void MacroAssembler::incrementw(Address dst, int value) 1850 { 1851 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1852 ldrw(rscratch1, dst); 1853 incrementw(rscratch1, value); 1854 strw(rscratch1, dst); 1855 } 1856 1857 void MacroAssembler::increment(Address dst, int value) 1858 { 1859 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 1860 ldr(rscratch1, dst); 1861 increment(rscratch1, value); 1862 str(rscratch1, dst); 1863 } 1864 1865 1866 void MacroAssembler::pusha() { 1867 push(0x7fffffff, sp); 1868 } 1869 1870 void MacroAssembler::popa() { 1871 pop(0x7fffffff, sp); 1872 } 1873 1874 // Push lots of registers in the bit set supplied. Don't push sp. 1875 // Return the number of words pushed 1876 int MacroAssembler::push(unsigned int bitset, Register stack) { 1877 int words_pushed = 0; 1878 1879 // Scan bitset to accumulate register pairs 1880 unsigned char regs[32]; 1881 int count = 0; 1882 for (int reg = 0; reg <= 30; reg++) { 1883 if (1 & bitset) 1884 regs[count++] = reg; 1885 bitset >>= 1; 1886 } 1887 regs[count++] = zr->encoding_nocheck(); 1888 count &= ~1; // Only push an even nuber of regs 1889 1890 if (count) { 1891 stp(as_Register(regs[0]), as_Register(regs[1]), 1892 Address(pre(stack, -count * wordSize))); 1893 words_pushed += 2; 1894 } 1895 for (int i = 2; i < count; i += 2) { 1896 stp(as_Register(regs[i]), as_Register(regs[i+1]), 1897 Address(stack, i * wordSize)); 1898 words_pushed += 2; 1899 } 1900 1901 assert(words_pushed == count, "oops, pushed != count"); 1902 1903 return count; 1904 } 1905 1906 int MacroAssembler::pop(unsigned int bitset, Register stack) { 1907 int words_pushed = 0; 1908 1909 // Scan bitset to accumulate register pairs 1910 unsigned char regs[32]; 1911 int count = 0; 1912 for (int reg = 0; reg <= 30; reg++) { 1913 if (1 & bitset) 1914 regs[count++] = reg; 1915 bitset >>= 1; 1916 } 1917 regs[count++] = zr->encoding_nocheck(); 1918 count &= ~1; 1919 1920 for (int i = 2; i < count; i += 2) { 1921 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 1922 Address(stack, i * wordSize)); 1923 words_pushed += 2; 1924 } 1925 if (count) { 1926 ldp(as_Register(regs[0]), as_Register(regs[1]), 1927 Address(post(stack, count * wordSize))); 1928 words_pushed += 2; 1929 } 1930 1931 assert(words_pushed == count, "oops, pushed != count"); 1932 1933 return count; 1934 } 1935 #ifdef ASSERT 1936 void MacroAssembler::verify_heapbase(const char* msg) { 1937 #if 0 1938 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 1939 assert (Universe::heap() != NULL, "java heap should be initialized"); 1940 if (CheckCompressedOops) { 1941 Label ok; 1942 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 1943 cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 1944 br(Assembler::EQ, ok); 1945 stop(msg); 1946 bind(ok); 1947 pop(1 << rscratch1->encoding(), sp); 1948 } 1949 #endif 1950 } 1951 #endif 1952 1953 void MacroAssembler::stop(const char* msg) { 1954 address ip = pc(); 1955 pusha(); 1956 mov(c_rarg0, (address)msg); 1957 mov(c_rarg1, (address)ip); 1958 mov(c_rarg2, sp); 1959 mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); 1960 // call(c_rarg3); 1961 blrt(c_rarg3, 3, 0, 1); 1962 hlt(0); 1963 } 1964 1965 // If a constant does not fit in an immediate field, generate some 1966 // number of MOV instructions and then perform the operation. 1967 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1968 add_sub_imm_insn insn1, 1969 add_sub_reg_insn insn2) { 1970 assert(Rd != zr, "Rd = zr and not setting flags?"); 1971 if (operand_valid_for_add_sub_immediate((int)imm)) { 1972 (this->*insn1)(Rd, Rn, imm); 1973 } else { 1974 if (uabs(imm) < (1 << 24)) { 1975 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 1976 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 1977 } else { 1978 assert_different_registers(Rd, Rn); 1979 mov(Rd, (uint64_t)imm); 1980 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1981 } 1982 } 1983 } 1984 1985 // Seperate vsn which sets the flags. Optimisations are more restricted 1986 // because we must set the flags correctly. 1987 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1988 add_sub_imm_insn insn1, 1989 add_sub_reg_insn insn2) { 1990 if (operand_valid_for_add_sub_immediate((int)imm)) { 1991 (this->*insn1)(Rd, Rn, imm); 1992 } else { 1993 assert_different_registers(Rd, Rn); 1994 assert(Rd != zr, "overflow in immediate operand"); 1995 mov(Rd, (uint64_t)imm); 1996 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 1997 } 1998 } 1999 2000 2001 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 2002 if (increment.is_register()) { 2003 add(Rd, Rn, increment.as_register()); 2004 } else { 2005 add(Rd, Rn, increment.as_constant()); 2006 } 2007 } 2008 2009 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 2010 if (increment.is_register()) { 2011 addw(Rd, Rn, increment.as_register()); 2012 } else { 2013 addw(Rd, Rn, increment.as_constant()); 2014 } 2015 } 2016 2017 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 2018 if (decrement.is_register()) { 2019 sub(Rd, Rn, decrement.as_register()); 2020 } else { 2021 sub(Rd, Rn, decrement.as_constant()); 2022 } 2023 } 2024 2025 void MacroAssembler::reinit_heapbase() 2026 { 2027 if (UseCompressedOops) { 2028 if (Universe::is_fully_initialized()) { 2029 mov(rheapbase, Universe::narrow_ptrs_base()); 2030 } else { 2031 lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 2032 ldr(rheapbase, Address(rheapbase)); 2033 } 2034 } 2035 } 2036 2037 // this simulates the behaviour of the x86 cmpxchg instruction using a 2038 // load linked/store conditional pair. we use the acquire/release 2039 // versions of these instructions so that we flush pending writes as 2040 // per Java semantics. 2041 2042 // n.b the x86 version assumes the old value to be compared against is 2043 // in rax and updates rax with the value located in memory if the 2044 // cmpxchg fails. we supply a register for the old value explicitly 2045 2046 // the aarch64 load linked/store conditional instructions do not 2047 // accept an offset. so, unlike x86, we must provide a plain register 2048 // to identify the memory word to be compared/exchanged rather than a 2049 // register+offset Address. 2050 2051 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 2052 Label &succeed, Label *fail) { 2053 // oldv holds comparison value 2054 // newv holds value to write in exchange 2055 // addr identifies memory word to compare against/update 2056 // tmp returns 0/1 for success/failure 2057 Label retry_load, nope; 2058 2059 bind(retry_load); 2060 // flush and load exclusive from the memory location 2061 // and fail if it is not what we expect 2062 ldaxr(tmp, addr); 2063 cmp(tmp, oldv); 2064 br(Assembler::NE, nope); 2065 // if we store+flush with no intervening write tmp wil be zero 2066 stlxr(tmp, newv, addr); 2067 cbzw(tmp, succeed); 2068 // retry so we only ever return after a load fails to compare 2069 // ensures we don't return a stale value after a failed write. 2070 b(retry_load); 2071 // if the memory word differs we return it in oldv and signal a fail 2072 bind(nope); 2073 membar(AnyAny); 2074 mov(oldv, tmp); 2075 if (fail) 2076 b(*fail); 2077 } 2078 2079 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 2080 Label &succeed, Label *fail) { 2081 // oldv holds comparison value 2082 // newv holds value to write in exchange 2083 // addr identifies memory word to compare against/update 2084 // tmp returns 0/1 for success/failure 2085 Label retry_load, nope; 2086 2087 bind(retry_load); 2088 // flush and load exclusive from the memory location 2089 // and fail if it is not what we expect 2090 ldaxrw(tmp, addr); 2091 cmp(tmp, oldv); 2092 br(Assembler::NE, nope); 2093 // if we store+flush with no intervening write tmp wil be zero 2094 stlxrw(tmp, newv, addr); 2095 cbzw(tmp, succeed); 2096 // retry so we only ever return after a load fails to compare 2097 // ensures we don't return a stale value after a failed write. 2098 b(retry_load); 2099 // if the memory word differs we return it in oldv and signal a fail 2100 bind(nope); 2101 membar(AnyAny); 2102 mov(oldv, tmp); 2103 if (fail) 2104 b(*fail); 2105 } 2106 2107 static bool different(Register a, RegisterOrConstant b, Register c) { 2108 if (b.is_constant()) 2109 return a != c; 2110 else 2111 return a != b.as_register() && a != c && b.as_register() != c; 2112 } 2113 2114 #define ATOMIC_OP(LDXR, OP, STXR) \ 2115 void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ 2116 Register result = rscratch2; \ 2117 if (prev->is_valid()) \ 2118 result = different(prev, incr, addr) ? prev : rscratch2; \ 2119 \ 2120 Label retry_load; \ 2121 bind(retry_load); \ 2122 LDXR(result, addr); \ 2123 OP(rscratch1, result, incr); \ 2124 STXR(rscratch1, rscratch1, addr); \ 2125 cbnzw(rscratch1, retry_load); \ 2126 if (prev->is_valid() && prev != result) \ 2127 mov(prev, result); \ 2128 } 2129 2130 ATOMIC_OP(ldxr, add, stxr) 2131 ATOMIC_OP(ldxrw, addw, stxrw) 2132 2133 #undef ATOMIC_OP 2134 2135 #define ATOMIC_XCHG(OP, LDXR, STXR) \ 2136 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 2137 Register result = rscratch2; \ 2138 if (prev->is_valid()) \ 2139 result = different(prev, newv, addr) ? prev : rscratch2; \ 2140 \ 2141 Label retry_load; \ 2142 bind(retry_load); \ 2143 LDXR(result, addr); \ 2144 STXR(rscratch1, newv, addr); \ 2145 cbnzw(rscratch1, retry_load); \ 2146 if (prev->is_valid() && prev != result) \ 2147 mov(prev, result); \ 2148 } 2149 2150 ATOMIC_XCHG(xchg, ldxr, stxr) 2151 ATOMIC_XCHG(xchgw, ldxrw, stxrw) 2152 2153 #undef ATOMIC_XCHG 2154 2155 void MacroAssembler::incr_allocated_bytes(Register thread, 2156 Register var_size_in_bytes, 2157 int con_size_in_bytes, 2158 Register t1) { 2159 if (!thread->is_valid()) { 2160 thread = rthread; 2161 } 2162 assert(t1->is_valid(), "need temp reg"); 2163 2164 ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2165 if (var_size_in_bytes->is_valid()) { 2166 add(t1, t1, var_size_in_bytes); 2167 } else { 2168 add(t1, t1, con_size_in_bytes); 2169 } 2170 str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset()))); 2171 } 2172 2173 #ifndef PRODUCT 2174 extern "C" void findpc(intptr_t x); 2175 #endif 2176 2177 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 2178 { 2179 // In order to get locks to work, we need to fake a in_VM state 2180 if (ShowMessageBoxOnError ) { 2181 JavaThread* thread = JavaThread::current(); 2182 JavaThreadState saved_state = thread->thread_state(); 2183 thread->set_thread_state(_thread_in_vm); 2184 #ifndef PRODUCT 2185 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 2186 ttyLocker ttyl; 2187 BytecodeCounter::print(); 2188 } 2189 #endif 2190 if (os::message_box(msg, "Execution stopped, print registers?")) { 2191 ttyLocker ttyl; 2192 tty->print_cr(" pc = 0x%016lx", pc); 2193 #ifndef PRODUCT 2194 tty->cr(); 2195 findpc(pc); 2196 tty->cr(); 2197 #endif 2198 tty->print_cr(" r0 = 0x%016lx", regs[0]); 2199 tty->print_cr(" r1 = 0x%016lx", regs[1]); 2200 tty->print_cr(" r2 = 0x%016lx", regs[2]); 2201 tty->print_cr(" r3 = 0x%016lx", regs[3]); 2202 tty->print_cr(" r4 = 0x%016lx", regs[4]); 2203 tty->print_cr(" r5 = 0x%016lx", regs[5]); 2204 tty->print_cr(" r6 = 0x%016lx", regs[6]); 2205 tty->print_cr(" r7 = 0x%016lx", regs[7]); 2206 tty->print_cr(" r8 = 0x%016lx", regs[8]); 2207 tty->print_cr(" r9 = 0x%016lx", regs[9]); 2208 tty->print_cr("r10 = 0x%016lx", regs[10]); 2209 tty->print_cr("r11 = 0x%016lx", regs[11]); 2210 tty->print_cr("r12 = 0x%016lx", regs[12]); 2211 tty->print_cr("r13 = 0x%016lx", regs[13]); 2212 tty->print_cr("r14 = 0x%016lx", regs[14]); 2213 tty->print_cr("r15 = 0x%016lx", regs[15]); 2214 tty->print_cr("r16 = 0x%016lx", regs[16]); 2215 tty->print_cr("r17 = 0x%016lx", regs[17]); 2216 tty->print_cr("r18 = 0x%016lx", regs[18]); 2217 tty->print_cr("r19 = 0x%016lx", regs[19]); 2218 tty->print_cr("r20 = 0x%016lx", regs[20]); 2219 tty->print_cr("r21 = 0x%016lx", regs[21]); 2220 tty->print_cr("r22 = 0x%016lx", regs[22]); 2221 tty->print_cr("r23 = 0x%016lx", regs[23]); 2222 tty->print_cr("r24 = 0x%016lx", regs[24]); 2223 tty->print_cr("r25 = 0x%016lx", regs[25]); 2224 tty->print_cr("r26 = 0x%016lx", regs[26]); 2225 tty->print_cr("r27 = 0x%016lx", regs[27]); 2226 tty->print_cr("r28 = 0x%016lx", regs[28]); 2227 tty->print_cr("r30 = 0x%016lx", regs[30]); 2228 tty->print_cr("r31 = 0x%016lx", regs[31]); 2229 BREAKPOINT; 2230 } 2231 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 2232 } else { 2233 ttyLocker ttyl; 2234 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 2235 msg); 2236 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 2237 } 2238 } 2239 2240 #ifdef BUILTIN_SIM 2241 // routine to generate an x86 prolog for a stub function which 2242 // bootstraps into the generated ARM code which directly follows the 2243 // stub 2244 // 2245 // the argument encodes the number of general and fp registers 2246 // passed by the caller and the callng convention (currently just 2247 // the number of general registers and assumes C argument passing) 2248 2249 extern "C" { 2250 int aarch64_stub_prolog_size(); 2251 void aarch64_stub_prolog(); 2252 void aarch64_prolog(); 2253 } 2254 2255 void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, 2256 address *prolog_ptr) 2257 { 2258 int calltype = (((ret_type & 0x3) << 8) | 2259 ((fp_arg_count & 0xf) << 4) | 2260 (gp_arg_count & 0xf)); 2261 2262 // the addresses for the x86 to ARM entry code we need to use 2263 address start = pc(); 2264 // printf("start = %lx\n", start); 2265 int byteCount = aarch64_stub_prolog_size(); 2266 // printf("byteCount = %x\n", byteCount); 2267 int instructionCount = (byteCount + 3)/ 4; 2268 // printf("instructionCount = %x\n", instructionCount); 2269 for (int i = 0; i < instructionCount; i++) { 2270 nop(); 2271 } 2272 2273 memcpy(start, (void*)aarch64_stub_prolog, byteCount); 2274 2275 // write the address of the setup routine and the call format at the 2276 // end of into the copied code 2277 u_int64_t *patch_end = (u_int64_t *)(start + byteCount); 2278 if (prolog_ptr) 2279 patch_end[-2] = (u_int64_t)prolog_ptr; 2280 patch_end[-1] = calltype; 2281 } 2282 #endif 2283 2284 void MacroAssembler::push_CPU_state() { 2285 push(0x3fffffff, sp); // integer registers except lr & sp 2286 2287 for (int i = 30; i >= 0; i -= 2) 2288 stpd(as_FloatRegister(i), as_FloatRegister(i+1), 2289 Address(pre(sp, -2 * wordSize))); 2290 } 2291 2292 void MacroAssembler::pop_CPU_state() { 2293 for (int i = 0; i < 32; i += 2) 2294 ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 2295 Address(post(sp, 2 * wordSize))); 2296 2297 pop(0x3fffffff, sp); // integer registers except lr & sp 2298 } 2299 2300 /** 2301 * Helpers for multiply_to_len(). 2302 */ 2303 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 2304 Register src1, Register src2) { 2305 adds(dest_lo, dest_lo, src1); 2306 adc(dest_hi, dest_hi, zr); 2307 adds(dest_lo, dest_lo, src2); 2308 adc(final_dest_hi, dest_hi, zr); 2309 } 2310 2311 // Generate an address from (r + r1 extend offset). "size" is the 2312 // size of the operand. The result may be in rscratch2. 2313 Address MacroAssembler::offsetted_address(Register r, Register r1, 2314 Address::extend ext, int offset, int size) { 2315 if (offset || (ext.shift() % size != 0)) { 2316 lea(rscratch2, Address(r, r1, ext)); 2317 return Address(rscratch2, offset); 2318 } else { 2319 return Address(r, r1, ext); 2320 } 2321 } 2322 2323 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 2324 { 2325 assert(offset >= 0, "spill to negative address?"); 2326 // Offset reachable ? 2327 // Not aligned - 9 bits signed offset 2328 // Aligned - 12 bits unsigned offset shifted 2329 Register base = sp; 2330 if ((offset & (size-1)) && offset >= (1<<8)) { 2331 add(tmp, base, offset & ((1<<12)-1)); 2332 base = tmp; 2333 offset &= -1<<12; 2334 } 2335 2336 if (offset >= (1<<12) * size) { 2337 add(tmp, base, offset & (((1<<12)-1)<<12)); 2338 base = tmp; 2339 offset &= ~(((1<<12)-1)<<12); 2340 } 2341 2342 return Address(base, offset); 2343 } 2344 2345 /** 2346 * Multiply 64 bit by 64 bit first loop. 2347 */ 2348 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2349 Register y, Register y_idx, Register z, 2350 Register carry, Register product, 2351 Register idx, Register kdx) { 2352 // 2353 // jlong carry, x[], y[], z[]; 2354 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2355 // huge_128 product = y[idx] * x[xstart] + carry; 2356 // z[kdx] = (jlong)product; 2357 // carry = (jlong)(product >>> 64); 2358 // } 2359 // z[xstart] = carry; 2360 // 2361 2362 Label L_first_loop, L_first_loop_exit; 2363 Label L_one_x, L_one_y, L_multiply; 2364 2365 subsw(xstart, xstart, 1); 2366 br(Assembler::MI, L_one_x); 2367 2368 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 2369 ldr(x_xstart, Address(rscratch1)); 2370 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 2371 2372 bind(L_first_loop); 2373 subsw(idx, idx, 1); 2374 br(Assembler::MI, L_first_loop_exit); 2375 subsw(idx, idx, 1); 2376 br(Assembler::MI, L_one_y); 2377 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2378 ldr(y_idx, Address(rscratch1)); 2379 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 2380 bind(L_multiply); 2381 2382 // AArch64 has a multiply-accumulate instruction that we can't use 2383 // here because it has no way to process carries, so we have to use 2384 // separate add and adc instructions. Bah. 2385 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 2386 mul(product, x_xstart, y_idx); 2387 adds(product, product, carry); 2388 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 2389 2390 subw(kdx, kdx, 2); 2391 ror(product, product, 32); // back to big-endian 2392 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 2393 2394 b(L_first_loop); 2395 2396 bind(L_one_y); 2397 ldrw(y_idx, Address(y, 0)); 2398 b(L_multiply); 2399 2400 bind(L_one_x); 2401 ldrw(x_xstart, Address(x, 0)); 2402 b(L_first_loop); 2403 2404 bind(L_first_loop_exit); 2405 } 2406 2407 /** 2408 * Multiply 128 bit by 128. Unrolled inner loop. 2409 * 2410 */ 2411 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 2412 Register carry, Register carry2, 2413 Register idx, Register jdx, 2414 Register yz_idx1, Register yz_idx2, 2415 Register tmp, Register tmp3, Register tmp4, 2416 Register tmp6, Register product_hi) { 2417 2418 // jlong carry, x[], y[], z[]; 2419 // int kdx = ystart+1; 2420 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 2421 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 2422 // jlong carry2 = (jlong)(tmp3 >>> 64); 2423 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 2424 // carry = (jlong)(tmp4 >>> 64); 2425 // z[kdx+idx+1] = (jlong)tmp3; 2426 // z[kdx+idx] = (jlong)tmp4; 2427 // } 2428 // idx += 2; 2429 // if (idx > 0) { 2430 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 2431 // z[kdx+idx] = (jlong)yz_idx1; 2432 // carry = (jlong)(yz_idx1 >>> 64); 2433 // } 2434 // 2435 2436 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 2437 2438 lsrw(jdx, idx, 2); 2439 2440 bind(L_third_loop); 2441 2442 subsw(jdx, jdx, 1); 2443 br(Assembler::MI, L_third_loop_exit); 2444 subw(idx, idx, 4); 2445 2446 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2447 2448 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 2449 2450 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2451 2452 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 2453 ror(yz_idx2, yz_idx2, 32); 2454 2455 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 2456 2457 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2458 umulh(tmp4, product_hi, yz_idx1); 2459 2460 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 2461 ror(rscratch2, rscratch2, 32); 2462 2463 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 2464 umulh(carry2, product_hi, yz_idx2); 2465 2466 // propagate sum of both multiplications into carry:tmp4:tmp3 2467 adds(tmp3, tmp3, carry); 2468 adc(tmp4, tmp4, zr); 2469 adds(tmp3, tmp3, rscratch1); 2470 adcs(tmp4, tmp4, tmp); 2471 adc(carry, carry2, zr); 2472 adds(tmp4, tmp4, rscratch2); 2473 adc(carry, carry, zr); 2474 2475 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 2476 ror(tmp4, tmp4, 32); 2477 stp(tmp4, tmp3, Address(tmp6, 0)); 2478 2479 b(L_third_loop); 2480 bind (L_third_loop_exit); 2481 2482 andw (idx, idx, 0x3); 2483 cbz(idx, L_post_third_loop_done); 2484 2485 Label L_check_1; 2486 subsw(idx, idx, 2); 2487 br(Assembler::MI, L_check_1); 2488 2489 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2490 ldr(yz_idx1, Address(rscratch1, 0)); 2491 ror(yz_idx1, yz_idx1, 32); 2492 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 2493 umulh(tmp4, product_hi, yz_idx1); 2494 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2495 ldr(yz_idx2, Address(rscratch1, 0)); 2496 ror(yz_idx2, yz_idx2, 32); 2497 2498 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 2499 2500 ror(tmp3, tmp3, 32); 2501 str(tmp3, Address(rscratch1, 0)); 2502 2503 bind (L_check_1); 2504 2505 andw (idx, idx, 0x1); 2506 subsw(idx, idx, 1); 2507 br(Assembler::MI, L_post_third_loop_done); 2508 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 2509 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 2510 umulh(carry2, tmp4, product_hi); 2511 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2512 2513 add2_with_carry(carry2, tmp3, tmp4, carry); 2514 2515 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 2516 extr(carry, carry2, tmp3, 32); 2517 2518 bind(L_post_third_loop_done); 2519 } 2520 2521 /** 2522 * Code for BigInteger::multiplyToLen() instrinsic. 2523 * 2524 * r0: x 2525 * r1: xlen 2526 * r2: y 2527 * r3: ylen 2528 * r4: z 2529 * r5: zlen 2530 * r10: tmp1 2531 * r11: tmp2 2532 * r12: tmp3 2533 * r13: tmp4 2534 * r14: tmp5 2535 * r15: tmp6 2536 * r16: tmp7 2537 * 2538 */ 2539 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 2540 Register z, Register zlen, 2541 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 2542 Register tmp5, Register tmp6, Register product_hi) { 2543 2544 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 2545 2546 const Register idx = tmp1; 2547 const Register kdx = tmp2; 2548 const Register xstart = tmp3; 2549 2550 const Register y_idx = tmp4; 2551 const Register carry = tmp5; 2552 const Register product = xlen; 2553 const Register x_xstart = zlen; // reuse register 2554 2555 // First Loop. 2556 // 2557 // final static long LONG_MASK = 0xffffffffL; 2558 // int xstart = xlen - 1; 2559 // int ystart = ylen - 1; 2560 // long carry = 0; 2561 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 2562 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 2563 // z[kdx] = (int)product; 2564 // carry = product >>> 32; 2565 // } 2566 // z[xstart] = (int)carry; 2567 // 2568 2569 movw(idx, ylen); // idx = ylen; 2570 movw(kdx, zlen); // kdx = xlen+ylen; 2571 mov(carry, zr); // carry = 0; 2572 2573 Label L_done; 2574 2575 movw(xstart, xlen); 2576 subsw(xstart, xstart, 1); 2577 br(Assembler::MI, L_done); 2578 2579 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 2580 2581 Label L_second_loop; 2582 cbzw(kdx, L_second_loop); 2583 2584 Label L_carry; 2585 subw(kdx, kdx, 1); 2586 cbzw(kdx, L_carry); 2587 2588 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2589 lsr(carry, carry, 32); 2590 subw(kdx, kdx, 1); 2591 2592 bind(L_carry); 2593 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 2594 2595 // Second and third (nested) loops. 2596 // 2597 // for (int i = xstart-1; i >= 0; i--) { // Second loop 2598 // carry = 0; 2599 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 2600 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 2601 // (z[k] & LONG_MASK) + carry; 2602 // z[k] = (int)product; 2603 // carry = product >>> 32; 2604 // } 2605 // z[i] = (int)carry; 2606 // } 2607 // 2608 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 2609 2610 const Register jdx = tmp1; 2611 2612 bind(L_second_loop); 2613 mov(carry, zr); // carry = 0; 2614 movw(jdx, ylen); // j = ystart+1 2615 2616 subsw(xstart, xstart, 1); // i = xstart-1; 2617 br(Assembler::MI, L_done); 2618 2619 str(z, Address(pre(sp, -4 * wordSize))); 2620 2621 Label L_last_x; 2622 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 2623 subsw(xstart, xstart, 1); // i = xstart-1; 2624 br(Assembler::MI, L_last_x); 2625 2626 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 2627 ldr(product_hi, Address(rscratch1)); 2628 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 2629 2630 Label L_third_loop_prologue; 2631 bind(L_third_loop_prologue); 2632 2633 str(ylen, Address(sp, wordSize)); 2634 stp(x, xstart, Address(sp, 2 * wordSize)); 2635 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 2636 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 2637 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 2638 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 2639 2640 addw(tmp3, xlen, 1); 2641 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2642 subsw(tmp3, tmp3, 1); 2643 br(Assembler::MI, L_done); 2644 2645 lsr(carry, carry, 32); 2646 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 2647 b(L_second_loop); 2648 2649 // Next infrequent code is moved outside loops. 2650 bind(L_last_x); 2651 ldrw(product_hi, Address(x, 0)); 2652 b(L_third_loop_prologue); 2653 2654 bind(L_done); 2655 } 2656 2657 /** 2658 * Emits code to update CRC-32 with a byte value according to constants in table 2659 * 2660 * @param [in,out]crc Register containing the crc. 2661 * @param [in]val Register containing the byte to fold into the CRC. 2662 * @param [in]table Register containing the table of crc constants. 2663 * 2664 * uint32_t crc; 2665 * val = crc_table[(val ^ crc) & 0xFF]; 2666 * crc = val ^ (crc >> 8); 2667 * 2668 */ 2669 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 2670 eor(val, val, crc); 2671 andr(val, val, 0xff); 2672 ldrw(val, Address(table, val, Address::lsl(2))); 2673 eor(crc, val, crc, Assembler::LSR, 8); 2674 } 2675 2676 /** 2677 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 2678 * 2679 * @param [in,out]crc Register containing the crc. 2680 * @param [in]v Register containing the 32-bit to fold into the CRC. 2681 * @param [in]table0 Register containing table 0 of crc constants. 2682 * @param [in]table1 Register containing table 1 of crc constants. 2683 * @param [in]table2 Register containing table 2 of crc constants. 2684 * @param [in]table3 Register containing table 3 of crc constants. 2685 * 2686 * uint32_t crc; 2687 * v = crc ^ v 2688 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 2689 * 2690 */ 2691 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 2692 Register table0, Register table1, Register table2, Register table3, 2693 bool upper) { 2694 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 2695 uxtb(tmp, v); 2696 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 2697 ubfx(tmp, v, 8, 8); 2698 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 2699 eor(crc, crc, tmp); 2700 ubfx(tmp, v, 16, 8); 2701 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 2702 eor(crc, crc, tmp); 2703 ubfx(tmp, v, 24, 8); 2704 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 2705 eor(crc, crc, tmp); 2706 } 2707 2708 /** 2709 * @param crc register containing existing CRC (32-bit) 2710 * @param buf register pointing to input byte buffer (byte*) 2711 * @param len register containing number of bytes 2712 * @param table register that will contain address of CRC table 2713 * @param tmp scratch register 2714 */ 2715 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 2716 Register table0, Register table1, Register table2, Register table3, 2717 Register tmp, Register tmp2, Register tmp3) { 2718 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 2719 unsigned long offset; 2720 2721 ornw(crc, zr, crc); 2722 2723 if (UseCRC32) { 2724 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2725 2726 subs(len, len, 64); 2727 br(Assembler::GE, CRC_by64_loop); 2728 adds(len, len, 64-4); 2729 br(Assembler::GE, CRC_by4_loop); 2730 adds(len, len, 4); 2731 br(Assembler::GT, CRC_by1_loop); 2732 b(L_exit); 2733 2734 BIND(CRC_by4_loop); 2735 ldrw(tmp, Address(post(buf, 4))); 2736 subs(len, len, 4); 2737 crc32w(crc, crc, tmp); 2738 br(Assembler::GE, CRC_by4_loop); 2739 adds(len, len, 4); 2740 br(Assembler::LE, L_exit); 2741 BIND(CRC_by1_loop); 2742 ldrb(tmp, Address(post(buf, 1))); 2743 subs(len, len, 1); 2744 crc32b(crc, crc, tmp); 2745 br(Assembler::GT, CRC_by1_loop); 2746 b(L_exit); 2747 2748 align(CodeEntryAlignment); 2749 BIND(CRC_by64_loop); 2750 subs(len, len, 64); 2751 ldp(tmp, tmp3, Address(post(buf, 16))); 2752 crc32x(crc, crc, tmp); 2753 crc32x(crc, crc, tmp3); 2754 ldp(tmp, tmp3, Address(post(buf, 16))); 2755 crc32x(crc, crc, tmp); 2756 crc32x(crc, crc, tmp3); 2757 ldp(tmp, tmp3, Address(post(buf, 16))); 2758 crc32x(crc, crc, tmp); 2759 crc32x(crc, crc, tmp3); 2760 ldp(tmp, tmp3, Address(post(buf, 16))); 2761 crc32x(crc, crc, tmp); 2762 crc32x(crc, crc, tmp3); 2763 br(Assembler::GE, CRC_by64_loop); 2764 adds(len, len, 64-4); 2765 br(Assembler::GE, CRC_by4_loop); 2766 adds(len, len, 4); 2767 br(Assembler::GT, CRC_by1_loop); 2768 BIND(L_exit); 2769 ornw(crc, zr, crc); 2770 return; 2771 } 2772 2773 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 2774 if (offset) add(table0, table0, offset); 2775 add(table1, table0, 1*256*sizeof(juint)); 2776 add(table2, table0, 2*256*sizeof(juint)); 2777 add(table3, table0, 3*256*sizeof(juint)); 2778 2779 if (UseNeon) { 2780 cmp(len, 64); 2781 br(Assembler::LT, L_by16); 2782 eor(v16, T16B, v16, v16); 2783 2784 Label L_fold; 2785 2786 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 2787 2788 ld1(v0, v1, T2D, post(buf, 32)); 2789 ld1r(v4, T2D, post(tmp, 8)); 2790 ld1r(v5, T2D, post(tmp, 8)); 2791 ld1r(v6, T2D, post(tmp, 8)); 2792 ld1r(v7, T2D, post(tmp, 8)); 2793 mov(v16, T4S, 0, crc); 2794 2795 eor(v0, T16B, v0, v16); 2796 sub(len, len, 64); 2797 2798 BIND(L_fold); 2799 pmull(v22, T8H, v0, v5, T8B); 2800 pmull(v20, T8H, v0, v7, T8B); 2801 pmull(v23, T8H, v0, v4, T8B); 2802 pmull(v21, T8H, v0, v6, T8B); 2803 2804 pmull2(v18, T8H, v0, v5, T16B); 2805 pmull2(v16, T8H, v0, v7, T16B); 2806 pmull2(v19, T8H, v0, v4, T16B); 2807 pmull2(v17, T8H, v0, v6, T16B); 2808 2809 uzp1(v24, v20, v22, T8H); 2810 uzp2(v25, v20, v22, T8H); 2811 eor(v20, T16B, v24, v25); 2812 2813 uzp1(v26, v16, v18, T8H); 2814 uzp2(v27, v16, v18, T8H); 2815 eor(v16, T16B, v26, v27); 2816 2817 ushll2(v22, T4S, v20, T8H, 8); 2818 ushll(v20, T4S, v20, T4H, 8); 2819 2820 ushll2(v18, T4S, v16, T8H, 8); 2821 ushll(v16, T4S, v16, T4H, 8); 2822 2823 eor(v22, T16B, v23, v22); 2824 eor(v18, T16B, v19, v18); 2825 eor(v20, T16B, v21, v20); 2826 eor(v16, T16B, v17, v16); 2827 2828 uzp1(v17, v16, v20, T2D); 2829 uzp2(v21, v16, v20, T2D); 2830 eor(v17, T16B, v17, v21); 2831 2832 ushll2(v20, T2D, v17, T4S, 16); 2833 ushll(v16, T2D, v17, T2S, 16); 2834 2835 eor(v20, T16B, v20, v22); 2836 eor(v16, T16B, v16, v18); 2837 2838 uzp1(v17, v20, v16, T2D); 2839 uzp2(v21, v20, v16, T2D); 2840 eor(v28, T16B, v17, v21); 2841 2842 pmull(v22, T8H, v1, v5, T8B); 2843 pmull(v20, T8H, v1, v7, T8B); 2844 pmull(v23, T8H, v1, v4, T8B); 2845 pmull(v21, T8H, v1, v6, T8B); 2846 2847 pmull2(v18, T8H, v1, v5, T16B); 2848 pmull2(v16, T8H, v1, v7, T16B); 2849 pmull2(v19, T8H, v1, v4, T16B); 2850 pmull2(v17, T8H, v1, v6, T16B); 2851 2852 ld1(v0, v1, T2D, post(buf, 32)); 2853 2854 uzp1(v24, v20, v22, T8H); 2855 uzp2(v25, v20, v22, T8H); 2856 eor(v20, T16B, v24, v25); 2857 2858 uzp1(v26, v16, v18, T8H); 2859 uzp2(v27, v16, v18, T8H); 2860 eor(v16, T16B, v26, v27); 2861 2862 ushll2(v22, T4S, v20, T8H, 8); 2863 ushll(v20, T4S, v20, T4H, 8); 2864 2865 ushll2(v18, T4S, v16, T8H, 8); 2866 ushll(v16, T4S, v16, T4H, 8); 2867 2868 eor(v22, T16B, v23, v22); 2869 eor(v18, T16B, v19, v18); 2870 eor(v20, T16B, v21, v20); 2871 eor(v16, T16B, v17, v16); 2872 2873 uzp1(v17, v16, v20, T2D); 2874 uzp2(v21, v16, v20, T2D); 2875 eor(v16, T16B, v17, v21); 2876 2877 ushll2(v20, T2D, v16, T4S, 16); 2878 ushll(v16, T2D, v16, T2S, 16); 2879 2880 eor(v20, T16B, v22, v20); 2881 eor(v16, T16B, v16, v18); 2882 2883 uzp1(v17, v20, v16, T2D); 2884 uzp2(v21, v20, v16, T2D); 2885 eor(v20, T16B, v17, v21); 2886 2887 shl(v16, T2D, v28, 1); 2888 shl(v17, T2D, v20, 1); 2889 2890 eor(v0, T16B, v0, v16); 2891 eor(v1, T16B, v1, v17); 2892 2893 subs(len, len, 32); 2894 br(Assembler::GE, L_fold); 2895 2896 mov(crc, 0); 2897 mov(tmp, v0, T1D, 0); 2898 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2899 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2900 mov(tmp, v0, T1D, 1); 2901 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2902 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2903 mov(tmp, v1, T1D, 0); 2904 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2905 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2906 mov(tmp, v1, T1D, 1); 2907 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2908 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2909 2910 add(len, len, 32); 2911 } 2912 2913 BIND(L_by16); 2914 subs(len, len, 16); 2915 br(Assembler::GE, L_by16_loop); 2916 adds(len, len, 16-4); 2917 br(Assembler::GE, L_by4_loop); 2918 adds(len, len, 4); 2919 br(Assembler::GT, L_by1_loop); 2920 b(L_exit); 2921 2922 BIND(L_by4_loop); 2923 ldrw(tmp, Address(post(buf, 4))); 2924 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 2925 subs(len, len, 4); 2926 br(Assembler::GE, L_by4_loop); 2927 adds(len, len, 4); 2928 br(Assembler::LE, L_exit); 2929 BIND(L_by1_loop); 2930 subs(len, len, 1); 2931 ldrb(tmp, Address(post(buf, 1))); 2932 update_byte_crc32(crc, tmp, table0); 2933 br(Assembler::GT, L_by1_loop); 2934 b(L_exit); 2935 2936 align(CodeEntryAlignment); 2937 BIND(L_by16_loop); 2938 subs(len, len, 16); 2939 ldp(tmp, tmp3, Address(post(buf, 16))); 2940 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 2941 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 2942 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 2943 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 2944 br(Assembler::GE, L_by16_loop); 2945 adds(len, len, 16-4); 2946 br(Assembler::GE, L_by4_loop); 2947 adds(len, len, 4); 2948 br(Assembler::GT, L_by1_loop); 2949 BIND(L_exit); 2950 ornw(crc, zr, crc); 2951 } 2952 2953 /** 2954 * @param crc register containing existing CRC (32-bit) 2955 * @param buf register pointing to input byte buffer (byte*) 2956 * @param len register containing number of bytes 2957 * @param table register that will contain address of CRC table 2958 * @param tmp scratch register 2959 */ 2960 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 2961 Register table0, Register table1, Register table2, Register table3, 2962 Register tmp, Register tmp2, Register tmp3) { 2963 Label L_exit; 2964 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop; 2965 2966 subs(len, len, 64); 2967 br(Assembler::GE, CRC_by64_loop); 2968 adds(len, len, 64-4); 2969 br(Assembler::GE, CRC_by4_loop); 2970 adds(len, len, 4); 2971 br(Assembler::GT, CRC_by1_loop); 2972 b(L_exit); 2973 2974 BIND(CRC_by4_loop); 2975 ldrw(tmp, Address(post(buf, 4))); 2976 subs(len, len, 4); 2977 crc32cw(crc, crc, tmp); 2978 br(Assembler::GE, CRC_by4_loop); 2979 adds(len, len, 4); 2980 br(Assembler::LE, L_exit); 2981 BIND(CRC_by1_loop); 2982 ldrb(tmp, Address(post(buf, 1))); 2983 subs(len, len, 1); 2984 crc32cb(crc, crc, tmp); 2985 br(Assembler::GT, CRC_by1_loop); 2986 b(L_exit); 2987 2988 align(CodeEntryAlignment); 2989 BIND(CRC_by64_loop); 2990 subs(len, len, 64); 2991 ldp(tmp, tmp3, Address(post(buf, 16))); 2992 crc32cx(crc, crc, tmp); 2993 crc32cx(crc, crc, tmp3); 2994 ldp(tmp, tmp3, Address(post(buf, 16))); 2995 crc32cx(crc, crc, tmp); 2996 crc32cx(crc, crc, tmp3); 2997 ldp(tmp, tmp3, Address(post(buf, 16))); 2998 crc32cx(crc, crc, tmp); 2999 crc32cx(crc, crc, tmp3); 3000 ldp(tmp, tmp3, Address(post(buf, 16))); 3001 crc32cx(crc, crc, tmp); 3002 crc32cx(crc, crc, tmp3); 3003 br(Assembler::GE, CRC_by64_loop); 3004 adds(len, len, 64-4); 3005 br(Assembler::GE, CRC_by4_loop); 3006 adds(len, len, 4); 3007 br(Assembler::GT, CRC_by1_loop); 3008 BIND(L_exit); 3009 return; 3010 } 3011 3012 SkipIfEqual::SkipIfEqual( 3013 MacroAssembler* masm, const bool* flag_addr, bool value) { 3014 _masm = masm; 3015 unsigned long offset; 3016 _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset); 3017 _masm->ldrb(rscratch1, Address(rscratch1, offset)); 3018 _masm->cbzw(rscratch1, _label); 3019 } 3020 3021 SkipIfEqual::~SkipIfEqual() { 3022 _masm->bind(_label); 3023 } 3024 3025 void MacroAssembler::cmpptr(Register src1, Address src2) { 3026 unsigned long offset; 3027 adrp(rscratch1, src2, offset); 3028 ldr(rscratch1, Address(rscratch1, offset)); 3029 cmp(src1, rscratch1); 3030 } 3031 3032 void MacroAssembler::store_check(Register obj, Address dst) { 3033 store_check(obj); 3034 } 3035 3036 void MacroAssembler::store_check(Register obj) { 3037 // Does a store check for the oop in register obj. The content of 3038 // register obj is destroyed afterwards. 3039 3040 BarrierSet* bs = Universe::heap()->barrier_set(); 3041 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 3042 3043 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 3044 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3045 3046 lsr(obj, obj, CardTableModRefBS::card_shift); 3047 3048 assert(CardTableModRefBS::dirty_card_val() == 0, "must be"); 3049 3050 { 3051 ExternalAddress cardtable((address) ct->byte_map_base); 3052 unsigned long offset; 3053 adrp(rscratch1, cardtable, offset); 3054 assert(offset == 0, "byte_map_base is misaligned"); 3055 } 3056 3057 if (UseCondCardMark) { 3058 Label L_already_dirty; 3059 ldrb(rscratch2, Address(obj, rscratch1)); 3060 cbz(rscratch2, L_already_dirty); 3061 strb(zr, Address(obj, rscratch1)); 3062 bind(L_already_dirty); 3063 } else { 3064 strb(zr, Address(obj, rscratch1)); 3065 } 3066 } 3067 3068 void MacroAssembler::load_klass(Register dst, Register src) { 3069 if (UseCompressedClassPointers) { 3070 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3071 decode_klass_not_null(dst); 3072 } else { 3073 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 3074 } 3075 } 3076 3077 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { 3078 if (UseCompressedClassPointers) { 3079 ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3080 if (Universe::narrow_klass_base() == NULL) { 3081 cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift()); 3082 return; 3083 } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3084 && Universe::narrow_klass_shift() == 0) { 3085 // Only the bottom 32 bits matter 3086 cmpw(trial_klass, tmp); 3087 return; 3088 } 3089 decode_klass_not_null(tmp); 3090 } else { 3091 ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); 3092 } 3093 cmp(trial_klass, tmp); 3094 } 3095 3096 void MacroAssembler::load_prototype_header(Register dst, Register src) { 3097 load_klass(dst, src); 3098 ldr(dst, Address(dst, Klass::prototype_header_offset())); 3099 } 3100 3101 void MacroAssembler::store_klass(Register dst, Register src) { 3102 // FIXME: Should this be a store release? concurrent gcs assumes 3103 // klass length is valid if klass field is not null. 3104 if (UseCompressedClassPointers) { 3105 encode_klass_not_null(src); 3106 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3107 } else { 3108 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 3109 } 3110 } 3111 3112 void MacroAssembler::store_klass_gap(Register dst, Register src) { 3113 if (UseCompressedClassPointers) { 3114 // Store to klass gap in destination 3115 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 3116 } 3117 } 3118 3119 // Algorithm must match oop.inline.hpp encode_heap_oop. 3120 void MacroAssembler::encode_heap_oop(Register d, Register s) { 3121 #ifdef ASSERT 3122 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 3123 #endif 3124 verify_oop(s, "broken oop in encode_heap_oop"); 3125 if (Universe::narrow_oop_base() == NULL) { 3126 if (Universe::narrow_oop_shift() != 0) { 3127 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3128 lsr(d, s, LogMinObjAlignmentInBytes); 3129 } else { 3130 mov(d, s); 3131 } 3132 } else { 3133 subs(d, s, rheapbase); 3134 csel(d, d, zr, Assembler::HS); 3135 lsr(d, d, LogMinObjAlignmentInBytes); 3136 3137 /* Old algorithm: is this any worse? 3138 Label nonnull; 3139 cbnz(r, nonnull); 3140 sub(r, r, rheapbase); 3141 bind(nonnull); 3142 lsr(r, r, LogMinObjAlignmentInBytes); 3143 */ 3144 } 3145 } 3146 3147 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3148 #ifdef ASSERT 3149 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 3150 if (CheckCompressedOops) { 3151 Label ok; 3152 cbnz(r, ok); 3153 stop("null oop passed to encode_heap_oop_not_null"); 3154 bind(ok); 3155 } 3156 #endif 3157 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 3158 if (Universe::narrow_oop_base() != NULL) { 3159 sub(r, r, rheapbase); 3160 } 3161 if (Universe::narrow_oop_shift() != 0) { 3162 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3163 lsr(r, r, LogMinObjAlignmentInBytes); 3164 } 3165 } 3166 3167 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 3168 #ifdef ASSERT 3169 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 3170 if (CheckCompressedOops) { 3171 Label ok; 3172 cbnz(src, ok); 3173 stop("null oop passed to encode_heap_oop_not_null2"); 3174 bind(ok); 3175 } 3176 #endif 3177 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 3178 3179 Register data = src; 3180 if (Universe::narrow_oop_base() != NULL) { 3181 sub(dst, src, rheapbase); 3182 data = dst; 3183 } 3184 if (Universe::narrow_oop_shift() != 0) { 3185 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3186 lsr(dst, data, LogMinObjAlignmentInBytes); 3187 data = dst; 3188 } 3189 if (data == src) 3190 mov(dst, src); 3191 } 3192 3193 void MacroAssembler::decode_heap_oop(Register d, Register s) { 3194 #ifdef ASSERT 3195 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 3196 #endif 3197 if (Universe::narrow_oop_base() == NULL) { 3198 if (Universe::narrow_oop_shift() != 0 || d != s) { 3199 lsl(d, s, Universe::narrow_oop_shift()); 3200 } 3201 } else { 3202 Label done; 3203 if (d != s) 3204 mov(d, s); 3205 cbz(s, done); 3206 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 3207 bind(done); 3208 } 3209 verify_oop(d, "broken oop in decode_heap_oop"); 3210 } 3211 3212 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3213 assert (UseCompressedOops, "should only be used for compressed headers"); 3214 assert (Universe::heap() != NULL, "java heap should be initialized"); 3215 // Cannot assert, unverified entry point counts instructions (see .ad file) 3216 // vtableStubs also counts instructions in pd_code_size_limit. 3217 // Also do not verify_oop as this is called by verify_oop. 3218 if (Universe::narrow_oop_shift() != 0) { 3219 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3220 if (Universe::narrow_oop_base() != NULL) { 3221 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3222 } else { 3223 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 3224 } 3225 } else { 3226 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3227 } 3228 } 3229 3230 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 3231 assert (UseCompressedOops, "should only be used for compressed headers"); 3232 assert (Universe::heap() != NULL, "java heap should be initialized"); 3233 // Cannot assert, unverified entry point counts instructions (see .ad file) 3234 // vtableStubs also counts instructions in pd_code_size_limit. 3235 // Also do not verify_oop as this is called by verify_oop. 3236 if (Universe::narrow_oop_shift() != 0) { 3237 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3238 if (Universe::narrow_oop_base() != NULL) { 3239 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3240 } else { 3241 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 3242 } 3243 } else { 3244 assert (Universe::narrow_oop_base() == NULL, "sanity"); 3245 if (dst != src) { 3246 mov(dst, src); 3247 } 3248 } 3249 } 3250 3251 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3252 if (Universe::narrow_klass_base() == NULL) { 3253 if (Universe::narrow_klass_shift() != 0) { 3254 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3255 lsr(dst, src, LogKlassAlignmentInBytes); 3256 } else { 3257 if (dst != src) mov(dst, src); 3258 } 3259 return; 3260 } 3261 3262 if (use_XOR_for_compressed_class_base) { 3263 if (Universe::narrow_klass_shift() != 0) { 3264 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3265 lsr(dst, dst, LogKlassAlignmentInBytes); 3266 } else { 3267 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3268 } 3269 return; 3270 } 3271 3272 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3273 && Universe::narrow_klass_shift() == 0) { 3274 movw(dst, src); 3275 return; 3276 } 3277 3278 #ifdef ASSERT 3279 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 3280 #endif 3281 3282 Register rbase = dst; 3283 if (dst == src) rbase = rheapbase; 3284 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3285 sub(dst, src, rbase); 3286 if (Universe::narrow_klass_shift() != 0) { 3287 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3288 lsr(dst, dst, LogKlassAlignmentInBytes); 3289 } 3290 if (dst == src) reinit_heapbase(); 3291 } 3292 3293 void MacroAssembler::encode_klass_not_null(Register r) { 3294 encode_klass_not_null(r, r); 3295 } 3296 3297 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3298 Register rbase = dst; 3299 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3300 3301 if (Universe::narrow_klass_base() == NULL) { 3302 if (Universe::narrow_klass_shift() != 0) { 3303 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3304 lsl(dst, src, LogKlassAlignmentInBytes); 3305 } else { 3306 if (dst != src) mov(dst, src); 3307 } 3308 return; 3309 } 3310 3311 if (use_XOR_for_compressed_class_base) { 3312 if (Universe::narrow_klass_shift() != 0) { 3313 lsl(dst, src, LogKlassAlignmentInBytes); 3314 eor(dst, dst, (uint64_t)Universe::narrow_klass_base()); 3315 } else { 3316 eor(dst, src, (uint64_t)Universe::narrow_klass_base()); 3317 } 3318 return; 3319 } 3320 3321 if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0 3322 && Universe::narrow_klass_shift() == 0) { 3323 if (dst != src) 3324 movw(dst, src); 3325 movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32); 3326 return; 3327 } 3328 3329 // Cannot assert, unverified entry point counts instructions (see .ad file) 3330 // vtableStubs also counts instructions in pd_code_size_limit. 3331 // Also do not verify_oop as this is called by verify_oop. 3332 if (dst == src) rbase = rheapbase; 3333 mov(rbase, (uint64_t)Universe::narrow_klass_base()); 3334 if (Universe::narrow_klass_shift() != 0) { 3335 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3336 add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); 3337 } else { 3338 add(dst, rbase, src); 3339 } 3340 if (dst == src) reinit_heapbase(); 3341 } 3342 3343 void MacroAssembler::decode_klass_not_null(Register r) { 3344 decode_klass_not_null(r, r); 3345 } 3346 3347 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 3348 assert (UseCompressedOops, "should only be used for compressed oops"); 3349 assert (Universe::heap() != NULL, "java heap should be initialized"); 3350 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3351 3352 int oop_index = oop_recorder()->find_index(obj); 3353 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3354 3355 InstructionMark im(this); 3356 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3357 code_section()->relocate(inst_mark(), rspec); 3358 movz(dst, 0xDEAD, 16); 3359 movk(dst, 0xBEEF); 3360 } 3361 3362 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 3363 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 3364 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3365 int index = oop_recorder()->find_index(k); 3366 assert(! Universe::heap()->is_in_reserved(k), "should not be an oop"); 3367 3368 InstructionMark im(this); 3369 RelocationHolder rspec = metadata_Relocation::spec(index); 3370 code_section()->relocate(inst_mark(), rspec); 3371 narrowKlass nk = Klass::encode_klass(k); 3372 movz(dst, (nk >> 16), 16); 3373 movk(dst, nk & 0xffff); 3374 } 3375 3376 void MacroAssembler::load_heap_oop(Register dst, Address src) 3377 { 3378 if (UseCompressedOops) { 3379 ldrw(dst, src); 3380 decode_heap_oop(dst); 3381 } else { 3382 ldr(dst, src); 3383 } 3384 } 3385 3386 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) 3387 { 3388 if (UseCompressedOops) { 3389 ldrw(dst, src); 3390 decode_heap_oop_not_null(dst); 3391 } else { 3392 ldr(dst, src); 3393 } 3394 } 3395 3396 void MacroAssembler::store_heap_oop(Address dst, Register src) { 3397 if (UseCompressedOops) { 3398 assert(!dst.uses(src), "not enough registers"); 3399 encode_heap_oop(src); 3400 strw(src, dst); 3401 } else 3402 str(src, dst); 3403 } 3404 3405 // Used for storing NULLs. 3406 void MacroAssembler::store_heap_oop_null(Address dst) { 3407 if (UseCompressedOops) { 3408 strw(zr, dst); 3409 } else 3410 str(zr, dst); 3411 } 3412 3413 #if INCLUDE_ALL_GCS 3414 void MacroAssembler::g1_write_barrier_pre(Register obj, 3415 Register pre_val, 3416 Register thread, 3417 Register tmp, 3418 bool tosca_live, 3419 bool expand_call) { 3420 // If expand_call is true then we expand the call_VM_leaf macro 3421 // directly to skip generating the check by 3422 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 3423 3424 assert(thread == rthread, "must be"); 3425 3426 Label done; 3427 Label runtime; 3428 3429 assert(pre_val != noreg, "check this code"); 3430 3431 if (obj != noreg) 3432 assert_different_registers(obj, pre_val, tmp); 3433 3434 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3435 PtrQueue::byte_offset_of_active())); 3436 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3437 PtrQueue::byte_offset_of_index())); 3438 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 3439 PtrQueue::byte_offset_of_buf())); 3440 3441 3442 // Is marking active? 3443 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3444 ldrw(tmp, in_progress); 3445 } else { 3446 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); 3447 ldrb(tmp, in_progress); 3448 } 3449 cbzw(tmp, done); 3450 3451 // Do we need to load the previous value? 3452 if (obj != noreg) { 3453 load_heap_oop(pre_val, Address(obj, 0)); 3454 } 3455 3456 // Is the previous value null? 3457 cbz(pre_val, done); 3458 3459 // Can we store original value in the thread's buffer? 3460 // Is index == 0? 3461 // (The index field is typed as size_t.) 3462 3463 ldr(tmp, index); // tmp := *index_adr 3464 cbz(tmp, runtime); // tmp == 0? 3465 // If yes, goto runtime 3466 3467 sub(tmp, tmp, wordSize); // tmp := tmp - wordSize 3468 str(tmp, index); // *index_adr := tmp 3469 ldr(rscratch1, buffer); 3470 add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr 3471 3472 // Record the previous value 3473 str(pre_val, Address(tmp, 0)); 3474 b(done); 3475 3476 bind(runtime); 3477 // save the live input values 3478 push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3479 3480 // Calling the runtime using the regular call_VM_leaf mechanism generates 3481 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 3482 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 3483 // 3484 // If we care generating the pre-barrier without a frame (e.g. in the 3485 // intrinsified Reference.get() routine) then ebp might be pointing to 3486 // the caller frame and so this check will most likely fail at runtime. 3487 // 3488 // Expanding the call directly bypasses the generation of the check. 3489 // So when we do not have have a full interpreter frame on the stack 3490 // expand_call should be passed true. 3491 3492 if (expand_call) { 3493 assert(pre_val != c_rarg1, "smashed arg"); 3494 pass_arg1(this, thread); 3495 pass_arg0(this, pre_val); 3496 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 3497 } else { 3498 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 3499 } 3500 3501 pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp); 3502 3503 bind(done); 3504 } 3505 3506 void MacroAssembler::g1_write_barrier_post(Register store_addr, 3507 Register new_val, 3508 Register thread, 3509 Register tmp, 3510 Register tmp2) { 3511 assert(thread == rthread, "must be"); 3512 3513 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3514 PtrQueue::byte_offset_of_index())); 3515 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 3516 PtrQueue::byte_offset_of_buf())); 3517 3518 BarrierSet* bs = Universe::heap()->barrier_set(); 3519 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 3520 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3521 3522 Label done; 3523 Label runtime; 3524 3525 // Does store cross heap regions? 3526 3527 eor(tmp, store_addr, new_val); 3528 lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes); 3529 cbz(tmp, done); 3530 3531 // crosses regions, storing NULL? 3532 3533 cbz(new_val, done); 3534 3535 // storing region crossing non-NULL, is card already dirty? 3536 3537 ExternalAddress cardtable((address) ct->byte_map_base); 3538 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 3539 const Register card_addr = tmp; 3540 3541 lsr(card_addr, store_addr, CardTableModRefBS::card_shift); 3542 3543 unsigned long offset; 3544 adrp(tmp2, cardtable, offset); 3545 3546 // get the address of the card 3547 add(card_addr, card_addr, tmp2); 3548 ldrb(tmp2, Address(card_addr, offset)); 3549 cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3550 br(Assembler::EQ, done); 3551 3552 assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0"); 3553 3554 membar(Assembler::StoreLoad); 3555 3556 ldrb(tmp2, Address(card_addr, offset)); 3557 cbzw(tmp2, done); 3558 3559 // storing a region crossing, non-NULL oop, card is clean. 3560 // dirty card and log. 3561 3562 strb(zr, Address(card_addr, offset)); 3563 3564 ldr(rscratch1, queue_index); 3565 cbz(rscratch1, runtime); 3566 sub(rscratch1, rscratch1, wordSize); 3567 str(rscratch1, queue_index); 3568 3569 ldr(tmp2, buffer); 3570 str(card_addr, Address(tmp2, rscratch1)); 3571 b(done); 3572 3573 bind(runtime); 3574 // save the live input values 3575 push(store_addr->bit(true) | new_val->bit(true), sp); 3576 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 3577 pop(store_addr->bit(true) | new_val->bit(true), sp); 3578 3579 bind(done); 3580 } 3581 3582 #endif // INCLUDE_ALL_GCS 3583 3584 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 3585 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 3586 int index = oop_recorder()->allocate_metadata_index(obj); 3587 RelocationHolder rspec = metadata_Relocation::spec(index); 3588 return Address((address)obj, rspec); 3589 } 3590 3591 // Move an oop into a register. immediate is true if we want 3592 // immediate instrcutions, i.e. we are not going to patch this 3593 // instruction while the code is being executed by another thread. In 3594 // that case we can use move immediates rather than the constant pool. 3595 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) { 3596 int oop_index; 3597 if (obj == NULL) { 3598 oop_index = oop_recorder()->allocate_oop_index(obj); 3599 } else { 3600 oop_index = oop_recorder()->find_index(obj); 3601 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop"); 3602 } 3603 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3604 if (! immediate) { 3605 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 3606 ldr_constant(dst, Address(dummy, rspec)); 3607 } else 3608 mov(dst, Address((address)obj, rspec)); 3609 } 3610 3611 // Move a metadata address into a register. 3612 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 3613 int oop_index; 3614 if (obj == NULL) { 3615 oop_index = oop_recorder()->allocate_metadata_index(obj); 3616 } else { 3617 oop_index = oop_recorder()->find_index(obj); 3618 } 3619 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 3620 mov(dst, Address((address)obj, rspec)); 3621 } 3622 3623 Address MacroAssembler::constant_oop_address(jobject obj) { 3624 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3625 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 3626 int oop_index = oop_recorder()->find_index(obj); 3627 return Address((address)obj, oop_Relocation::spec(oop_index)); 3628 } 3629 3630 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3631 void MacroAssembler::tlab_allocate(Register obj, 3632 Register var_size_in_bytes, 3633 int con_size_in_bytes, 3634 Register t1, 3635 Register t2, 3636 Label& slow_case) { 3637 assert_different_registers(obj, t2); 3638 assert_different_registers(obj, var_size_in_bytes); 3639 Register end = t2; 3640 3641 // verify_tlab(); 3642 3643 ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 3644 if (var_size_in_bytes == noreg) { 3645 lea(end, Address(obj, con_size_in_bytes)); 3646 } else { 3647 lea(end, Address(obj, var_size_in_bytes)); 3648 } 3649 ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 3650 cmp(end, rscratch1); 3651 br(Assembler::HI, slow_case); 3652 3653 // update the tlab top pointer 3654 str(end, Address(rthread, JavaThread::tlab_top_offset())); 3655 3656 // recover var_size_in_bytes if necessary 3657 if (var_size_in_bytes == end) { 3658 sub(var_size_in_bytes, var_size_in_bytes, obj); 3659 } 3660 // verify_tlab(); 3661 } 3662 3663 // Preserves r19, and r3. 3664 Register MacroAssembler::tlab_refill(Label& retry, 3665 Label& try_eden, 3666 Label& slow_case) { 3667 Register top = r0; 3668 Register t1 = r2; 3669 Register t2 = r4; 3670 assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3); 3671 Label do_refill, discard_tlab; 3672 3673 if (!Universe::heap()->supports_inline_contig_alloc()) { 3674 // No allocation in the shared eden. 3675 b(slow_case); 3676 } 3677 3678 ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3679 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3680 3681 // calculate amount of free space 3682 sub(t1, t1, top); 3683 lsr(t1, t1, LogHeapWordSize); 3684 3685 // Retain tlab and allocate object in shared space if 3686 // the amount free in the tlab is too large to discard. 3687 3688 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3689 cmp(t1, rscratch1); 3690 br(Assembler::LE, discard_tlab); 3691 3692 // Retain 3693 // ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3694 mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3695 add(rscratch1, rscratch1, t2); 3696 str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); 3697 3698 if (TLABStats) { 3699 // increment number of slow_allocations 3700 addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())), 3701 1, rscratch1); 3702 } 3703 b(try_eden); 3704 3705 bind(discard_tlab); 3706 if (TLABStats) { 3707 // increment number of refills 3708 addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1, 3709 rscratch1); 3710 // accumulate wastage -- t1 is amount free in tlab 3711 addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1, 3712 rscratch1); 3713 } 3714 3715 // if tlab is currently allocated (top or end != null) then 3716 // fill [top, end + alignment_reserve) with array object 3717 cbz(top, do_refill); 3718 3719 // set up the mark word 3720 mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); 3721 str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes())); 3722 // set the length to the remaining space 3723 sub(t1, t1, typeArrayOopDesc::header_size(T_INT)); 3724 add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); 3725 lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); 3726 strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes())); 3727 // set klass to intArrayKlass 3728 { 3729 unsigned long offset; 3730 // dubious reloc why not an oop reloc? 3731 adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()), 3732 offset); 3733 ldr(t1, Address(rscratch1, offset)); 3734 } 3735 // store klass last. concurrent gcs assumes klass length is valid if 3736 // klass field is not null. 3737 store_klass(top, t1); 3738 3739 mov(t1, top); 3740 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3741 sub(t1, t1, rscratch1); 3742 incr_allocated_bytes(rthread, t1, 0, rscratch1); 3743 3744 // refill the tlab with an eden allocation 3745 bind(do_refill); 3746 ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3747 lsl(t1, t1, LogHeapWordSize); 3748 // allocate new tlab, address returned in top 3749 eden_allocate(top, t1, 0, t2, slow_case); 3750 3751 // Check that t1 was preserved in eden_allocate. 3752 #ifdef ASSERT 3753 if (UseTLAB) { 3754 Label ok; 3755 Register tsize = r4; 3756 assert_different_registers(tsize, rthread, t1); 3757 str(tsize, Address(pre(sp, -16))); 3758 ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset()))); 3759 lsl(tsize, tsize, LogHeapWordSize); 3760 cmp(t1, tsize); 3761 br(Assembler::EQ, ok); 3762 STOP("assert(t1 != tlab size)"); 3763 should_not_reach_here(); 3764 3765 bind(ok); 3766 ldr(tsize, Address(post(sp, 16))); 3767 } 3768 #endif 3769 str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3770 str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3771 add(top, top, t1); 3772 sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); 3773 str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3774 verify_tlab(); 3775 b(retry); 3776 3777 return rthread; // for use by caller 3778 } 3779 3780 // Defines obj, preserves var_size_in_bytes 3781 void MacroAssembler::eden_allocate(Register obj, 3782 Register var_size_in_bytes, 3783 int con_size_in_bytes, 3784 Register t1, 3785 Label& slow_case) { 3786 assert_different_registers(obj, var_size_in_bytes, t1); 3787 if (!Universe::heap()->supports_inline_contig_alloc()) { 3788 b(slow_case); 3789 } else { 3790 Register end = t1; 3791 Register heap_end = rscratch2; 3792 Label retry; 3793 bind(retry); 3794 { 3795 unsigned long offset; 3796 adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset); 3797 ldr(heap_end, Address(rscratch1, offset)); 3798 } 3799 3800 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 3801 3802 // Get the current top of the heap 3803 { 3804 unsigned long offset; 3805 adrp(rscratch1, heap_top, offset); 3806 // Use add() here after ARDP, rather than lea(). 3807 // lea() does not generate anything if its offset is zero. 3808 // However, relocs expect to find either an ADD or a load/store 3809 // insn after an ADRP. add() always generates an ADD insn, even 3810 // for add(Rn, Rn, 0). 3811 add(rscratch1, rscratch1, offset); 3812 ldaxr(obj, rscratch1); 3813 } 3814 3815 // Adjust it my the size of our new object 3816 if (var_size_in_bytes == noreg) { 3817 lea(end, Address(obj, con_size_in_bytes)); 3818 } else { 3819 lea(end, Address(obj, var_size_in_bytes)); 3820 } 3821 3822 // if end < obj then we wrapped around high memory 3823 cmp(end, obj); 3824 br(Assembler::LO, slow_case); 3825 3826 cmp(end, heap_end); 3827 br(Assembler::HI, slow_case); 3828 3829 // If heap_top hasn't been changed by some other thread, update it. 3830 stlxr(rscratch2, end, rscratch1); 3831 cbnzw(rscratch2, retry); 3832 } 3833 } 3834 3835 void MacroAssembler::verify_tlab() { 3836 #ifdef ASSERT 3837 if (UseTLAB && VerifyOops) { 3838 Label next, ok; 3839 3840 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 3841 3842 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3843 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 3844 cmp(rscratch2, rscratch1); 3845 br(Assembler::HS, next); 3846 STOP("assert(top >= start)"); 3847 should_not_reach_here(); 3848 3849 bind(next); 3850 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 3851 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 3852 cmp(rscratch2, rscratch1); 3853 br(Assembler::HS, ok); 3854 STOP("assert(top <= end)"); 3855 should_not_reach_here(); 3856 3857 bind(ok); 3858 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 3859 } 3860 #endif 3861 } 3862 3863 // Writes to stack successive pages until offset reached to check for 3864 // stack overflow + shadow pages. This clobbers tmp. 3865 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 3866 assert_different_registers(tmp, size, rscratch1); 3867 mov(tmp, sp); 3868 // Bang stack for total size given plus shadow page size. 3869 // Bang one page at a time because large size can bang beyond yellow and 3870 // red zones. 3871 Label loop; 3872 mov(rscratch1, os::vm_page_size()); 3873 bind(loop); 3874 lea(tmp, Address(tmp, -os::vm_page_size())); 3875 subsw(size, size, rscratch1); 3876 str(size, Address(tmp)); 3877 br(Assembler::GT, loop); 3878 3879 // Bang down shadow pages too. 3880 // At this point, (tmp-0) is the last address touched, so don't 3881 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3882 // was post-decremented.) Skip this address by starting at i=1, and 3883 // touch a few more pages below. N.B. It is important to touch all 3884 // the way down to and including i=StackShadowPages. 3885 for (int i = 0; i< StackShadowPages-1; i++) { 3886 // this could be any sized move but this is can be a debugging crumb 3887 // so the bigger the better. 3888 lea(tmp, Address(tmp, -os::vm_page_size())); 3889 str(size, Address(tmp)); 3890 } 3891 } 3892 3893 3894 address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) { 3895 unsigned long off; 3896 adrp(r, Address(page, rtype), off); 3897 InstructionMark im(this); 3898 code_section()->relocate(inst_mark(), rtype); 3899 ldrw(zr, Address(r, off)); 3900 return inst_mark(); 3901 } 3902 3903 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 3904 InstructionMark im(this); 3905 code_section()->relocate(inst_mark(), rtype); 3906 ldrw(zr, Address(r, 0)); 3907 return inst_mark(); 3908 } 3909 3910 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) { 3911 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 3912 if (uabs(pc() - dest.target()) >= (1LL << 32)) { 3913 guarantee(rtype == relocInfo::none 3914 || rtype == relocInfo::external_word_type 3915 || rtype == relocInfo::poll_type 3916 || rtype == relocInfo::poll_return_type, 3917 "can only use a fixed address with an ADRP"); 3918 // Out of range. This doesn't happen very often, but we have to 3919 // handle it 3920 mov(reg1, dest); 3921 byte_offset = 0; 3922 } else { 3923 InstructionMark im(this); 3924 code_section()->relocate(inst_mark(), dest.rspec()); 3925 byte_offset = (uint64_t)dest.target() & 0xfff; 3926 _adrp(reg1, dest.target()); 3927 } 3928 } 3929 3930 void MacroAssembler::build_frame(int framesize) { 3931 assert(framesize > 0, "framesize must be > 0"); 3932 if (framesize < ((1 << 9) + 2 * wordSize)) { 3933 sub(sp, sp, framesize); 3934 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3935 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 3936 } else { 3937 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 3938 if (PreserveFramePointer) mov(rfp, sp); 3939 if (framesize < ((1 << 12) + 2 * wordSize)) 3940 sub(sp, sp, framesize - 2 * wordSize); 3941 else { 3942 mov(rscratch1, framesize - 2 * wordSize); 3943 sub(sp, sp, rscratch1); 3944 } 3945 } 3946 } 3947 3948 void MacroAssembler::remove_frame(int framesize) { 3949 assert(framesize > 0, "framesize must be > 0"); 3950 if (framesize < ((1 << 9) + 2 * wordSize)) { 3951 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 3952 add(sp, sp, framesize); 3953 } else { 3954 if (framesize < ((1 << 12) + 2 * wordSize)) 3955 add(sp, sp, framesize - 2 * wordSize); 3956 else { 3957 mov(rscratch1, framesize - 2 * wordSize); 3958 add(sp, sp, rscratch1); 3959 } 3960 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 3961 } 3962 } 3963 3964 3965 // Search for str1 in str2 and return index or -1 3966 void MacroAssembler::string_indexof(Register str2, Register str1, 3967 Register cnt2, Register cnt1, 3968 Register tmp1, Register tmp2, 3969 Register tmp3, Register tmp4, 3970 int icnt1, Register result) { 3971 Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH; 3972 3973 Register ch1 = rscratch1; 3974 Register ch2 = rscratch2; 3975 Register cnt1tmp = tmp1; 3976 Register cnt2tmp = tmp2; 3977 Register cnt1_neg = cnt1; 3978 Register cnt2_neg = cnt2; 3979 Register result_tmp = tmp4; 3980 3981 // Note, inline_string_indexOf() generates checks: 3982 // if (substr.count > string.count) return -1; 3983 // if (substr.count == 0) return 0; 3984 3985 // We have two strings, a source string in str2, cnt2 and a pattern string 3986 // in str1, cnt1. Find the 1st occurence of pattern in source or return -1. 3987 3988 // For larger pattern and source we use a simplified Boyer Moore algorithm. 3989 // With a small pattern and source we use linear scan. 3990 3991 if (icnt1 == -1) { 3992 cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 3993 ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use 3994 br(LO, LINEARSEARCH); // a byte array. 3995 cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM 3996 br(HS, LINEARSEARCH); 3997 } 3998 3999 // The Boyer Moore alogorithm is based on the description here:- 4000 // 4001 // http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm 4002 // 4003 // This describes and algorithm with 2 shift rules. The 'Bad Character' rule 4004 // and the 'Good Suffix' rule. 4005 // 4006 // These rules are essentially heuristics for how far we can shift the 4007 // pattern along the search string. 4008 // 4009 // The implementation here uses the 'Bad Character' rule only because of the 4010 // complexity of initialisation for the 'Good Suffix' rule. 4011 // 4012 // This is also known as the Boyer-Moore-Horspool algorithm:- 4013 // 4014 // http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm 4015 // 4016 // #define ASIZE 128 4017 // 4018 // int bm(unsigned char *x, int m, unsigned char *y, int n) { 4019 // int i, j; 4020 // unsigned c; 4021 // unsigned char bc[ASIZE]; 4022 // 4023 // /* Preprocessing */ 4024 // for (i = 0; i < ASIZE; ++i) 4025 // bc[i] = 0; 4026 // for (i = 0; i < m - 1; ) { 4027 // c = x[i]; 4028 // ++i; 4029 // if (c < ASIZE) bc[c] = i; 4030 // } 4031 // 4032 // /* Searching */ 4033 // j = 0; 4034 // while (j <= n - m) { 4035 // c = y[i+j]; 4036 // if (x[m-1] == c) 4037 // for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i); 4038 // if (i < 0) return j; 4039 // if (c < ASIZE) 4040 // j = j - bc[y[j+m-1]] + m; 4041 // else 4042 // j += 1; // Advance by 1 only if char >= ASIZE 4043 // } 4044 // } 4045 4046 if (icnt1 == -1) { 4047 BIND(BM); 4048 4049 Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP; 4050 Label BMADV, BMMATCH, BMCHECKEND; 4051 4052 Register cnt1end = tmp2; 4053 Register str2end = cnt2; 4054 Register skipch = tmp2; 4055 4056 // Restrict ASIZE to 128 to reduce stack space/initialisation. 4057 // The presence of chars >= ASIZE in the target string does not affect 4058 // performance, but we must be careful not to initialise them in the stack 4059 // array. 4060 // The presence of chars >= ASIZE in the source string may adversely affect 4061 // performance since we can only advance by one when we encounter one. 4062 4063 stp(zr, zr, pre(sp, -128)); 4064 for (int i = 1; i < 8; i++) 4065 stp(zr, zr, Address(sp, i*16)); 4066 4067 mov(cnt1tmp, 0); 4068 sub(cnt1end, cnt1, 1); 4069 BIND(BCLOOP); 4070 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4071 cmp(ch1, 128); 4072 add(cnt1tmp, cnt1tmp, 1); 4073 br(HS, BCSKIP); 4074 strb(cnt1tmp, Address(sp, ch1)); 4075 BIND(BCSKIP); 4076 cmp(cnt1tmp, cnt1end); 4077 br(LT, BCLOOP); 4078 4079 mov(result_tmp, str2); 4080 4081 sub(cnt2, cnt2, cnt1); 4082 add(str2end, str2, cnt2, LSL, 1); 4083 BIND(BMLOOPSTR2); 4084 sub(cnt1tmp, cnt1, 1); 4085 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4086 ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1))); 4087 cmp(ch1, skipch); 4088 br(NE, BMSKIP); 4089 subs(cnt1tmp, cnt1tmp, 1); 4090 br(LT, BMMATCH); 4091 BIND(BMLOOPSTR1); 4092 ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1))); 4093 ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1))); 4094 cmp(ch1, ch2); 4095 br(NE, BMSKIP); 4096 subs(cnt1tmp, cnt1tmp, 1); 4097 br(GE, BMLOOPSTR1); 4098 BIND(BMMATCH); 4099 sub(result_tmp, str2, result_tmp); 4100 lsr(result, result_tmp, 1); 4101 add(sp, sp, 128); 4102 b(DONE); 4103 BIND(BMADV); 4104 add(str2, str2, 2); 4105 b(BMCHECKEND); 4106 BIND(BMSKIP); 4107 cmp(skipch, 128); 4108 br(HS, BMADV); 4109 ldrb(ch2, Address(sp, skipch)); 4110 add(str2, str2, cnt1, LSL, 1); 4111 sub(str2, str2, ch2, LSL, 1); 4112 BIND(BMCHECKEND); 4113 cmp(str2, str2end); 4114 br(LE, BMLOOPSTR2); 4115 add(sp, sp, 128); 4116 b(NOMATCH); 4117 } 4118 4119 BIND(LINEARSEARCH); 4120 { 4121 Label DO1, DO2, DO3; 4122 4123 Register str2tmp = tmp2; 4124 Register first = tmp3; 4125 4126 if (icnt1 == -1) 4127 { 4128 Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD; 4129 4130 cmp(cnt1, 4); 4131 br(LT, DOSHORT); 4132 4133 sub(cnt2, cnt2, cnt1); 4134 sub(cnt1, cnt1, 4); 4135 mov(result_tmp, cnt2); 4136 4137 lea(str1, Address(str1, cnt1, Address::uxtw(1))); 4138 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4139 sub(cnt1_neg, zr, cnt1, LSL, 1); 4140 sub(cnt2_neg, zr, cnt2, LSL, 1); 4141 ldr(first, Address(str1, cnt1_neg)); 4142 4143 BIND(FIRST_LOOP); 4144 ldr(ch2, Address(str2, cnt2_neg)); 4145 cmp(first, ch2); 4146 br(EQ, STR1_LOOP); 4147 BIND(STR2_NEXT); 4148 adds(cnt2_neg, cnt2_neg, 2); 4149 br(LE, FIRST_LOOP); 4150 b(NOMATCH); 4151 4152 BIND(STR1_LOOP); 4153 adds(cnt1tmp, cnt1_neg, 8); 4154 add(cnt2tmp, cnt2_neg, 8); 4155 br(GE, LAST_WORD); 4156 4157 BIND(STR1_NEXT); 4158 ldr(ch1, Address(str1, cnt1tmp)); 4159 ldr(ch2, Address(str2, cnt2tmp)); 4160 cmp(ch1, ch2); 4161 br(NE, STR2_NEXT); 4162 adds(cnt1tmp, cnt1tmp, 8); 4163 add(cnt2tmp, cnt2tmp, 8); 4164 br(LT, STR1_NEXT); 4165 4166 BIND(LAST_WORD); 4167 ldr(ch1, Address(str1)); 4168 sub(str2tmp, str2, cnt1_neg); // adjust to corresponding 4169 ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2 4170 cmp(ch1, ch2); 4171 br(NE, STR2_NEXT); 4172 b(MATCH); 4173 4174 BIND(DOSHORT); 4175 cmp(cnt1, 2); 4176 br(LT, DO1); 4177 br(GT, DO3); 4178 } 4179 4180 if (icnt1 == 4) { 4181 Label CH1_LOOP; 4182 4183 ldr(ch1, str1); 4184 sub(cnt2, cnt2, 4); 4185 mov(result_tmp, cnt2); 4186 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4187 sub(cnt2_neg, zr, cnt2, LSL, 1); 4188 4189 BIND(CH1_LOOP); 4190 ldr(ch2, Address(str2, cnt2_neg)); 4191 cmp(ch1, ch2); 4192 br(EQ, MATCH); 4193 adds(cnt2_neg, cnt2_neg, 2); 4194 br(LE, CH1_LOOP); 4195 b(NOMATCH); 4196 } 4197 4198 if (icnt1 == -1 || icnt1 == 2) { 4199 Label CH1_LOOP; 4200 4201 BIND(DO2); 4202 ldrw(ch1, str1); 4203 sub(cnt2, cnt2, 2); 4204 mov(result_tmp, cnt2); 4205 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4206 sub(cnt2_neg, zr, cnt2, LSL, 1); 4207 4208 BIND(CH1_LOOP); 4209 ldrw(ch2, Address(str2, cnt2_neg)); 4210 cmp(ch1, ch2); 4211 br(EQ, MATCH); 4212 adds(cnt2_neg, cnt2_neg, 2); 4213 br(LE, CH1_LOOP); 4214 b(NOMATCH); 4215 } 4216 4217 if (icnt1 == -1 || icnt1 == 3) { 4218 Label FIRST_LOOP, STR2_NEXT, STR1_LOOP; 4219 4220 BIND(DO3); 4221 ldrw(first, str1); 4222 ldrh(ch1, Address(str1, 4)); 4223 4224 sub(cnt2, cnt2, 3); 4225 mov(result_tmp, cnt2); 4226 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4227 sub(cnt2_neg, zr, cnt2, LSL, 1); 4228 4229 BIND(FIRST_LOOP); 4230 ldrw(ch2, Address(str2, cnt2_neg)); 4231 cmpw(first, ch2); 4232 br(EQ, STR1_LOOP); 4233 BIND(STR2_NEXT); 4234 adds(cnt2_neg, cnt2_neg, 2); 4235 br(LE, FIRST_LOOP); 4236 b(NOMATCH); 4237 4238 BIND(STR1_LOOP); 4239 add(cnt2tmp, cnt2_neg, 4); 4240 ldrh(ch2, Address(str2, cnt2tmp)); 4241 cmp(ch1, ch2); 4242 br(NE, STR2_NEXT); 4243 b(MATCH); 4244 } 4245 4246 if (icnt1 == -1 || icnt1 == 1) { 4247 Label CH1_LOOP, HAS_ZERO; 4248 Label DO1_SHORT, DO1_LOOP; 4249 4250 BIND(DO1); 4251 ldrh(ch1, str1); 4252 cmp(cnt2, 4); 4253 br(LT, DO1_SHORT); 4254 4255 orr(ch1, ch1, ch1, LSL, 16); 4256 orr(ch1, ch1, ch1, LSL, 32); 4257 4258 sub(cnt2, cnt2, 4); 4259 mov(result_tmp, cnt2); 4260 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4261 sub(cnt2_neg, zr, cnt2, LSL, 1); 4262 4263 mov(tmp3, 0x0001000100010001); 4264 BIND(CH1_LOOP); 4265 ldr(ch2, Address(str2, cnt2_neg)); 4266 eor(ch2, ch1, ch2); 4267 sub(tmp1, ch2, tmp3); 4268 orr(tmp2, ch2, 0x7fff7fff7fff7fff); 4269 bics(tmp1, tmp1, tmp2); 4270 br(NE, HAS_ZERO); 4271 adds(cnt2_neg, cnt2_neg, 8); 4272 br(LT, CH1_LOOP); 4273 4274 cmp(cnt2_neg, 8); 4275 mov(cnt2_neg, 0); 4276 br(LT, CH1_LOOP); 4277 b(NOMATCH); 4278 4279 BIND(HAS_ZERO); 4280 rev(tmp1, tmp1); 4281 clz(tmp1, tmp1); 4282 add(cnt2_neg, cnt2_neg, tmp1, LSR, 3); 4283 b(MATCH); 4284 4285 BIND(DO1_SHORT); 4286 mov(result_tmp, cnt2); 4287 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4288 sub(cnt2_neg, zr, cnt2, LSL, 1); 4289 BIND(DO1_LOOP); 4290 ldrh(ch2, Address(str2, cnt2_neg)); 4291 cmpw(ch1, ch2); 4292 br(EQ, MATCH); 4293 adds(cnt2_neg, cnt2_neg, 2); 4294 br(LT, DO1_LOOP); 4295 } 4296 } 4297 BIND(NOMATCH); 4298 mov(result, -1); 4299 b(DONE); 4300 BIND(MATCH); 4301 add(result, result_tmp, cnt2_neg, ASR, 1); 4302 BIND(DONE); 4303 } 4304 4305 // Compare strings. 4306 void MacroAssembler::string_compare(Register str1, Register str2, 4307 Register cnt1, Register cnt2, Register result, 4308 Register tmp1) { 4309 Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING, 4310 NEXT_WORD, DIFFERENCE; 4311 4312 BLOCK_COMMENT("string_compare {"); 4313 4314 // Compute the minimum of the string lengths and save the difference. 4315 subsw(tmp1, cnt1, cnt2); 4316 cselw(cnt2, cnt1, cnt2, Assembler::LE); // min 4317 4318 // A very short string 4319 cmpw(cnt2, 4); 4320 br(Assembler::LT, SHORT_STRING); 4321 4322 // Check if the strings start at the same location. 4323 cmp(str1, str2); 4324 br(Assembler::EQ, LENGTH_DIFF); 4325 4326 // Compare longwords 4327 { 4328 subw(cnt2, cnt2, 4); // The last longword is a special case 4329 4330 // Move both string pointers to the last longword of their 4331 // strings, negate the remaining count, and convert it to bytes. 4332 lea(str1, Address(str1, cnt2, Address::uxtw(1))); 4333 lea(str2, Address(str2, cnt2, Address::uxtw(1))); 4334 sub(cnt2, zr, cnt2, LSL, 1); 4335 4336 // Loop, loading longwords and comparing them into rscratch2. 4337 bind(NEXT_WORD); 4338 ldr(result, Address(str1, cnt2)); 4339 ldr(cnt1, Address(str2, cnt2)); 4340 adds(cnt2, cnt2, wordSize); 4341 eor(rscratch2, result, cnt1); 4342 cbnz(rscratch2, DIFFERENCE); 4343 br(Assembler::LT, NEXT_WORD); 4344 4345 // Last longword. In the case where length == 4 we compare the 4346 // same longword twice, but that's still faster than another 4347 // conditional branch. 4348 4349 ldr(result, Address(str1)); 4350 ldr(cnt1, Address(str2)); 4351 eor(rscratch2, result, cnt1); 4352 cbz(rscratch2, LENGTH_DIFF); 4353 4354 // Find the first different characters in the longwords and 4355 // compute their difference. 4356 bind(DIFFERENCE); 4357 rev(rscratch2, rscratch2); 4358 clz(rscratch2, rscratch2); 4359 andr(rscratch2, rscratch2, -16); 4360 lsrv(result, result, rscratch2); 4361 uxthw(result, result); 4362 lsrv(cnt1, cnt1, rscratch2); 4363 uxthw(cnt1, cnt1); 4364 subw(result, result, cnt1); 4365 b(DONE); 4366 } 4367 4368 bind(SHORT_STRING); 4369 // Is the minimum length zero? 4370 cbz(cnt2, LENGTH_DIFF); 4371 4372 bind(SHORT_LOOP); 4373 load_unsigned_short(result, Address(post(str1, 2))); 4374 load_unsigned_short(cnt1, Address(post(str2, 2))); 4375 subw(result, result, cnt1); 4376 cbnz(result, DONE); 4377 sub(cnt2, cnt2, 1); 4378 cbnz(cnt2, SHORT_LOOP); 4379 4380 // Strings are equal up to min length. Return the length difference. 4381 bind(LENGTH_DIFF); 4382 mov(result, tmp1); 4383 4384 // That's it 4385 bind(DONE); 4386 4387 BLOCK_COMMENT("} string_compare"); 4388 } 4389 4390 4391 void MacroAssembler::string_equals(Register str1, Register str2, 4392 Register cnt, Register result, 4393 Register tmp1) { 4394 Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING, 4395 NEXT_WORD; 4396 4397 const Register tmp2 = rscratch1; 4398 assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2); 4399 4400 BLOCK_COMMENT("string_equals {"); 4401 4402 // Start by assuming that the strings are not equal. 4403 mov(result, zr); 4404 4405 // A very short string 4406 cmpw(cnt, 4); 4407 br(Assembler::LT, SHORT_STRING); 4408 4409 // Check if the strings start at the same location. 4410 cmp(str1, str2); 4411 br(Assembler::EQ, SAME_CHARS); 4412 4413 // Compare longwords 4414 { 4415 subw(cnt, cnt, 4); // The last longword is a special case 4416 4417 // Move both string pointers to the last longword of their 4418 // strings, negate the remaining count, and convert it to bytes. 4419 lea(str1, Address(str1, cnt, Address::uxtw(1))); 4420 lea(str2, Address(str2, cnt, Address::uxtw(1))); 4421 sub(cnt, zr, cnt, LSL, 1); 4422 4423 // Loop, loading longwords and comparing them into rscratch2. 4424 bind(NEXT_WORD); 4425 ldr(tmp1, Address(str1, cnt)); 4426 ldr(tmp2, Address(str2, cnt)); 4427 adds(cnt, cnt, wordSize); 4428 eor(rscratch2, tmp1, tmp2); 4429 cbnz(rscratch2, DONE); 4430 br(Assembler::LT, NEXT_WORD); 4431 4432 // Last longword. In the case where length == 4 we compare the 4433 // same longword twice, but that's still faster than another 4434 // conditional branch. 4435 4436 ldr(tmp1, Address(str1)); 4437 ldr(tmp2, Address(str2)); 4438 eor(rscratch2, tmp1, tmp2); 4439 cbz(rscratch2, SAME_CHARS); 4440 b(DONE); 4441 } 4442 4443 bind(SHORT_STRING); 4444 // Is the length zero? 4445 cbz(cnt, SAME_CHARS); 4446 4447 bind(SHORT_LOOP); 4448 load_unsigned_short(tmp1, Address(post(str1, 2))); 4449 load_unsigned_short(tmp2, Address(post(str2, 2))); 4450 subw(tmp1, tmp1, tmp2); 4451 cbnz(tmp1, DONE); 4452 sub(cnt, cnt, 1); 4453 cbnz(cnt, SHORT_LOOP); 4454 4455 // Strings are equal. 4456 bind(SAME_CHARS); 4457 mov(result, true); 4458 4459 // That's it 4460 bind(DONE); 4461 4462 BLOCK_COMMENT("} string_equals"); 4463 } 4464 4465 // Compare char[] arrays aligned to 4 bytes 4466 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4467 Register result, Register tmp1) 4468 { 4469 Register cnt1 = rscratch1; 4470 Register cnt2 = rscratch2; 4471 Register tmp2 = rscratch2; 4472 4473 Label SAME, DIFFER, NEXT, TAIL03, TAIL01; 4474 4475 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4476 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 4477 4478 BLOCK_COMMENT("char_arrays_equals {"); 4479 4480 // different until proven equal 4481 mov(result, false); 4482 4483 // same array? 4484 cmp(ary1, ary2); 4485 br(Assembler::EQ, SAME); 4486 4487 // ne if either null 4488 cbz(ary1, DIFFER); 4489 cbz(ary2, DIFFER); 4490 4491 // lengths ne? 4492 ldrw(cnt1, Address(ary1, length_offset)); 4493 ldrw(cnt2, Address(ary2, length_offset)); 4494 cmp(cnt1, cnt2); 4495 br(Assembler::NE, DIFFER); 4496 4497 lea(ary1, Address(ary1, base_offset)); 4498 lea(ary2, Address(ary2, base_offset)); 4499 4500 subs(cnt1, cnt1, 4); 4501 br(LT, TAIL03); 4502 4503 BIND(NEXT); 4504 ldr(tmp1, Address(post(ary1, 8))); 4505 ldr(tmp2, Address(post(ary2, 8))); 4506 subs(cnt1, cnt1, 4); 4507 eor(tmp1, tmp1, tmp2); 4508 cbnz(tmp1, DIFFER); 4509 br(GE, NEXT); 4510 4511 BIND(TAIL03); // 0-3 chars left, cnt1 = #chars left - 4 4512 tst(cnt1, 0b10); 4513 br(EQ, TAIL01); 4514 ldrw(tmp1, Address(post(ary1, 4))); 4515 ldrw(tmp2, Address(post(ary2, 4))); 4516 cmp(tmp1, tmp2); 4517 br(NE, DIFFER); 4518 BIND(TAIL01); // 0-1 chars left 4519 tst(cnt1, 0b01); 4520 br(EQ, SAME); 4521 ldrh(tmp1, ary1); 4522 ldrh(tmp2, ary2); 4523 cmp(tmp1, tmp2); 4524 br(NE, DIFFER); 4525 4526 BIND(SAME); 4527 mov(result, true); 4528 BIND(DIFFER); // result already set 4529 4530 BLOCK_COMMENT("} char_arrays_equals"); 4531 } 4532 4533 // encode char[] to byte[] in ISO_8859_1 4534 void MacroAssembler::encode_iso_array(Register src, Register dst, 4535 Register len, Register result, 4536 FloatRegister Vtmp1, FloatRegister Vtmp2, 4537 FloatRegister Vtmp3, FloatRegister Vtmp4) 4538 { 4539 Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1; 4540 Register tmp1 = rscratch1; 4541 4542 mov(result, len); // Save initial len 4543 4544 #ifndef BUILTIN_SIM 4545 subs(len, len, 32); 4546 br(LT, LOOP_8); 4547 4548 // The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions 4549 // to convert chars to bytes. These set the 'QC' bit in the FPSR if 4550 // any char could not fit in a byte, so clear the FPSR so we can test it. 4551 clear_fpsr(); 4552 4553 BIND(NEXT_32); 4554 ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); 4555 uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half 4556 uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half 4557 uqxtn(Vtmp2, T8B, Vtmp3, T8H); 4558 uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2 4559 get_fpsr(tmp1); 4560 cbnzw(tmp1, LOOP_8); 4561 st1(Vtmp1, Vtmp2, T16B, post(dst, 32)); 4562 subs(len, len, 32); 4563 add(src, src, 64); 4564 br(GE, NEXT_32); 4565 4566 BIND(LOOP_8); 4567 adds(len, len, 32-8); 4568 br(LT, LOOP_1); 4569 clear_fpsr(); // QC may be set from loop above, clear again 4570 BIND(NEXT_8); 4571 ld1(Vtmp1, T8H, src); 4572 uqxtn(Vtmp1, T8B, Vtmp1, T8H); 4573 get_fpsr(tmp1); 4574 cbnzw(tmp1, LOOP_1); 4575 st1(Vtmp1, T8B, post(dst, 8)); 4576 subs(len, len, 8); 4577 add(src, src, 16); 4578 br(GE, NEXT_8); 4579 4580 BIND(LOOP_1); 4581 adds(len, len, 8); 4582 br(LE, DONE); 4583 #else 4584 cbz(len, DONE); 4585 #endif 4586 BIND(NEXT_1); 4587 ldrh(tmp1, Address(post(src, 2))); 4588 tst(tmp1, 0xff00); 4589 br(NE, DONE); 4590 strb(tmp1, Address(post(dst, 1))); 4591 subs(len, len, 1); 4592 br(GT, NEXT_1); 4593 4594 BIND(DONE); 4595 sub(result, result, len); // Return index where we stopped 4596 }