1 /* 2 * Copyright (c) 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 26 #include "gc/shenandoah/shenandoahHeap.hpp" 27 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 28 #include "gc/shenandoah/shenandoahHeuristics.hpp" 29 #include "gc/shenandoah/shenandoahRuntime.hpp" 30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/thread.hpp" 35 #include "utilities/macros.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_LIRAssembler.hpp" 38 #include "c1/c1_MacroAssembler.hpp" 39 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 40 #endif 41 42 #define __ masm-> 43 44 address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; 45 46 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 47 Register src, Register dst, Register count) { 48 49 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 50 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 51 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 52 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 53 54 if (type == T_OBJECT || type == T_ARRAY) { 55 #ifdef _LP64 56 if (!checkcast) { 57 if (!obj_int) { 58 // Save count for barrier 59 __ movptr(r11, count); 60 } else if (disjoint) { 61 // Save dst in r11 in the disjoint case 62 __ movq(r11, dst); 63 } 64 } 65 #else 66 if (disjoint) { 67 __ mov(rdx, dst); // save 'to' 68 } 69 #endif 70 71 if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { 72 Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 73 #ifndef _LP64 74 __ push(thread); 75 __ get_thread(thread); 76 #endif 77 78 Label done; 79 // Short-circuit if count == 0. 80 __ testptr(count, count); 81 __ jcc(Assembler::zero, done); 82 83 // Avoid runtime call when not marking. 84 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 85 __ testb(gc_state, ShenandoahHeap::MARKING); 86 __ jcc(Assembler::zero, done); 87 88 __ pusha(); // push registers 89 #ifdef _LP64 90 if (count == c_rarg0) { 91 if (dst == c_rarg1) { 92 // exactly backwards!! 93 __ xchgptr(c_rarg1, c_rarg0); 94 } else { 95 __ movptr(c_rarg1, count); 96 __ movptr(c_rarg0, dst); 97 } 98 } else { 99 __ movptr(c_rarg0, dst); 100 __ movptr(c_rarg1, count); 101 } 102 if (UseCompressedOops) { 103 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2); 104 } else { 105 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); 106 } 107 #else 108 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 109 dst, count); 110 #endif 111 __ popa(); 112 __ bind(done); 113 NOT_LP64(__ pop(thread);) 114 } 115 } 116 117 } 118 119 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 120 Register src, Register dst, Register count) { 121 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 122 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 123 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 124 Register tmp = rax; 125 126 if (type == T_OBJECT || type == T_ARRAY) { 127 #ifdef _LP64 128 if (!checkcast) { 129 if (!obj_int) { 130 // Save count for barrier 131 count = r11; 132 } else if (disjoint && obj_int) { 133 // Use the saved dst in the disjoint case 134 dst = r11; 135 } 136 } else { 137 tmp = rscratch1; 138 } 139 #else 140 if (disjoint) { 141 __ mov(dst, rdx); // restore 'to' 142 } 143 #endif 144 145 Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 146 #ifndef _LP64 147 __ push(thread); 148 __ get_thread(thread); 149 #endif 150 151 // Short-circuit if count == 0. 152 Label done; 153 __ testptr(count, count); 154 __ jcc(Assembler::zero, done); 155 156 // Skip runtime call if no forwarded objects. 157 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 158 __ testb(gc_state, ShenandoahHeap::UPDATEREFS); 159 __ jcc(Assembler::zero, done); 160 161 __ pusha(); // push registers (overkill) 162 #ifdef _LP64 163 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 164 assert_different_registers(c_rarg1, dst); 165 __ mov(c_rarg1, count); 166 __ mov(c_rarg0, dst); 167 } else { 168 assert_different_registers(c_rarg0, count); 169 __ mov(c_rarg0, dst); 170 __ mov(c_rarg1, count); 171 } 172 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); 173 #else 174 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 175 dst, count); 176 #endif 177 __ popa(); 178 179 __ bind(done); 180 NOT_LP64(__ pop(thread);) 181 } 182 } 183 184 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 185 Register obj, 186 Register pre_val, 187 Register thread, 188 Register tmp, 189 bool tosca_live, 190 bool expand_call) { 191 192 if (ShenandoahSATBBarrier) { 193 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 194 } 195 } 196 197 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 198 Register obj, 199 Register pre_val, 200 Register thread, 201 Register tmp, 202 bool tosca_live, 203 bool expand_call) { 204 // If expand_call is true then we expand the call_VM_leaf macro 205 // directly to skip generating the check by 206 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 207 208 #ifdef _LP64 209 assert(thread == r15_thread, "must be"); 210 #endif // _LP64 211 212 Label done; 213 Label runtime; 214 215 assert(pre_val != noreg, "check this code"); 216 217 if (obj != noreg) { 218 assert_different_registers(obj, pre_val, tmp); 219 assert(pre_val != rax, "check this code"); 220 } 221 222 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 223 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 224 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 225 226 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 227 __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 228 __ jcc(Assembler::zero, done); 229 230 // Do we need to load the previous value? 231 if (obj != noreg) { 232 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 233 } 234 235 // Is the previous value null? 236 __ cmpptr(pre_val, (int32_t) NULL_WORD); 237 __ jcc(Assembler::equal, done); 238 239 // Can we store original value in the thread's buffer? 240 // Is index == 0? 241 // (The index field is typed as size_t.) 242 243 __ movptr(tmp, index); // tmp := *index_adr 244 __ cmpptr(tmp, 0); // tmp == 0? 245 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 246 247 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 248 __ movptr(index, tmp); // *index_adr := tmp 249 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 250 251 // Record the previous value 252 __ movptr(Address(tmp, 0), pre_val); 253 __ jmp(done); 254 255 __ bind(runtime); 256 // save the live input values 257 if(tosca_live) __ push(rax); 258 259 if (obj != noreg && obj != rax) 260 __ push(obj); 261 262 if (pre_val != rax) 263 __ push(pre_val); 264 265 // Calling the runtime using the regular call_VM_leaf mechanism generates 266 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 267 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. 268 // 269 // If we care generating the pre-barrier without a frame (e.g. in the 270 // intrinsified Reference.get() routine) then ebp might be pointing to 271 // the caller frame and so this check will most likely fail at runtime. 272 // 273 // Expanding the call directly bypasses the generation of the check. 274 // So when we do not have have a full interpreter frame on the stack 275 // expand_call should be passed true. 276 277 NOT_LP64( __ push(thread); ) 278 279 #ifdef _LP64 280 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should 281 // pre_val be c_rarg1 (where the call prologue would copy thread argument). 282 // Note: this should not accidentally smash thread, because thread is always r15. 283 assert(thread != c_rarg0, "smashed arg"); 284 if (c_rarg0 != pre_val) { 285 __ mov(c_rarg0, pre_val); 286 } 287 #endif 288 289 if (expand_call) { 290 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 291 #ifdef _LP64 292 if (c_rarg1 != thread) { 293 __ mov(c_rarg1, thread); 294 } 295 // Already moved pre_val into c_rarg0 above 296 #else 297 __ push(thread); 298 __ push(pre_val); 299 #endif 300 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2); 301 } else { 302 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); 303 } 304 305 NOT_LP64( __ pop(thread); ) 306 307 // save the live input values 308 if (pre_val != rax) 309 __ pop(pre_val); 310 311 if (obj != noreg && obj != rax) 312 __ pop(obj); 313 314 if(tosca_live) __ pop(rax); 315 316 __ bind(done); 317 } 318 319 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) { 320 assert(ShenandoahCASBarrier, "should be enabled"); 321 Label is_null; 322 __ testptr(dst, dst); 323 __ jcc(Assembler::zero, is_null); 324 resolve_forward_pointer_not_null(masm, dst); 325 __ bind(is_null); 326 } 327 328 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) { 329 assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled"); 330 __ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset())); 331 } 332 333 334 void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) { 335 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 336 #ifdef _LP64 337 Label done; 338 339 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 340 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 341 __ jccb(Assembler::zero, done); 342 343 // Heap is unstable, need to perform the resolve even if LRB is inactive 344 resolve_forward_pointer_not_null(masm, dst); 345 346 __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 347 __ jccb(Assembler::zero, done); 348 349 if (dst != rax) { 350 __ xchgptr(dst, rax); // Move obj into rax and save rax into obj. 351 } 352 353 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); 354 355 if (dst != rax) { 356 __ xchgptr(rax, dst); // Swap back obj with rax. 357 } 358 359 __ bind(done); 360 #else 361 Unimplemented(); 362 #endif 363 } 364 365 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { 366 if (ShenandoahStoreValEnqueueBarrier) { 367 storeval_barrier_impl(masm, dst, tmp); 368 } 369 } 370 371 void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { 372 assert(ShenandoahStoreValEnqueueBarrier, "should be enabled"); 373 374 if (dst == noreg) return; 375 376 #ifdef _LP64 377 if (ShenandoahStoreValEnqueueBarrier) { 378 // The set of registers to be saved+restored is the same as in the write-barrier above. 379 // Those are the commonly used registers in the interpreter. 380 __ pusha(); 381 // __ push_callee_saved_registers(); 382 __ subptr(rsp, 2 * Interpreter::stackElementSize); 383 __ movdbl(Address(rsp, 0), xmm0); 384 385 satb_write_barrier_pre(masm, noreg, dst, r15_thread, tmp, true, false); 386 __ movdbl(xmm0, Address(rsp, 0)); 387 __ addptr(rsp, 2 * Interpreter::stackElementSize); 388 //__ pop_callee_saved_registers(); 389 __ popa(); 390 } 391 #else 392 Unimplemented(); 393 #endif 394 } 395 396 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) { 397 if (ShenandoahLoadRefBarrier) { 398 Label done; 399 __ testptr(dst, dst); 400 __ jcc(Assembler::zero, done); 401 load_reference_barrier_not_null(masm, dst); 402 __ bind(done); 403 } 404 } 405 406 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 407 Register dst, Address src, Register tmp1, Register tmp_thread) { 408 bool on_oop = type == T_OBJECT || type == T_ARRAY; 409 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 410 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 411 bool on_reference = on_weak || on_phantom; 412 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 413 if (on_oop) { 414 load_reference_barrier(masm, dst); 415 416 if (ShenandoahKeepAliveBarrier && on_reference) { 417 const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 418 NOT_LP64(__ get_thread(thread)); 419 // Generate the SATB pre-barrier code to log the value of 420 // the referent field in an SATB buffer. 421 shenandoah_write_barrier_pre(masm /* masm */, 422 noreg /* obj */, 423 dst /* pre_val */, 424 thread /* thread */, 425 tmp1 /* tmp */, 426 true /* tosca_live */, 427 true /* expand_call */); 428 } 429 } 430 } 431 432 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 433 Address dst, Register val, Register tmp1, Register tmp2) { 434 435 bool on_oop = type == T_OBJECT || type == T_ARRAY; 436 bool in_heap = (decorators & IN_HEAP) != 0; 437 bool as_normal = (decorators & AS_NORMAL) != 0; 438 if (on_oop && in_heap) { 439 bool needs_pre_barrier = as_normal; 440 441 Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi); 442 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 443 // flatten object address if needed 444 // We do it regardless of precise because we need the registers 445 if (dst.index() == noreg && dst.disp() == 0) { 446 if (dst.base() != tmp1) { 447 __ movptr(tmp1, dst.base()); 448 } 449 } else { 450 __ lea(tmp1, dst); 451 } 452 453 #ifndef _LP64 454 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 455 #endif 456 457 NOT_LP64(__ get_thread(rcx)); 458 NOT_LP64(imasm->save_bcp()); 459 460 if (needs_pre_barrier) { 461 shenandoah_write_barrier_pre(masm /*masm*/, 462 tmp1 /* obj */, 463 tmp2 /* pre_val */, 464 rthread /* thread */, 465 tmp3 /* tmp */, 466 val != noreg /* tosca_live */, 467 false /* expand_call */); 468 } 469 if (val == noreg) { 470 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); 471 } else { 472 storeval_barrier(masm, val, tmp3); 473 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); 474 } 475 NOT_LP64(imasm->restore_bcp()); 476 } else { 477 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); 478 } 479 } 480 481 void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, 482 Register thread, Register obj, 483 Register var_size_in_bytes, 484 int con_size_in_bytes, 485 Register t1, Register t2, 486 Label& slow_case) { 487 assert_different_registers(obj, t1, t2); 488 assert_different_registers(obj, var_size_in_bytes, t1); 489 Register end = t2; 490 if (!thread->is_valid()) { 491 #ifdef _LP64 492 thread = r15_thread; 493 #else 494 assert(t1->is_valid(), "need temp reg"); 495 thread = t1; 496 __ get_thread(thread); 497 #endif 498 } 499 500 __ verify_tlab(); 501 502 __ movptr(obj, Address(thread, JavaThread::tlab_top_offset())); 503 if (var_size_in_bytes == noreg) { 504 __ lea(end, Address(obj, con_size_in_bytes + ShenandoahBrooksPointer::byte_size())); 505 } else { 506 __ addptr(var_size_in_bytes, ShenandoahBrooksPointer::byte_size()); 507 __ lea(end, Address(obj, var_size_in_bytes, Address::times_1)); 508 } 509 __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); 510 __ jcc(Assembler::above, slow_case); 511 512 // update the tlab top pointer 513 __ movptr(Address(thread, JavaThread::tlab_top_offset()), end); 514 515 // Initialize brooks pointer 516 #ifdef _LP64 517 __ incrementq(obj, ShenandoahBrooksPointer::byte_size()); 518 #else 519 __ incrementl(obj, ShenandoahBrooksPointer::byte_size()); 520 #endif 521 __ movptr(Address(obj, ShenandoahBrooksPointer::byte_offset()), obj); 522 523 // recover var_size_in_bytes if necessary 524 if (var_size_in_bytes == end) { 525 __ subptr(var_size_in_bytes, obj); 526 } 527 __ verify_tlab(); 528 } 529 530 // Special Shenandoah CAS implementation that handles false negatives 531 // due to concurrent evacuation. 532 #ifndef _LP64 533 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 534 Register res, Address addr, Register oldval, Register newval, 535 bool exchange, Register tmp1, Register tmp2) { 536 // Shenandoah has no 32-bit version for this. 537 Unimplemented(); 538 } 539 #else 540 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 541 Register res, Address addr, Register oldval, Register newval, 542 bool exchange, Register tmp1, Register tmp2) { 543 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); 544 assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); 545 546 Label retry, done; 547 548 // Remember oldval for retry logic below 549 if (UseCompressedOops) { 550 __ movl(tmp1, oldval); 551 } else { 552 __ movptr(tmp1, oldval); 553 } 554 555 // Step 1. Try to CAS with given arguments. If successful, then we are done, 556 // and can safely return. 557 if (os::is_MP()) __ lock(); 558 if (UseCompressedOops) { 559 __ cmpxchgl(newval, addr); 560 } else { 561 __ cmpxchgptr(newval, addr); 562 } 563 __ jcc(Assembler::equal, done, true); 564 565 // Step 2. CAS had failed. This may be a false negative. 566 // 567 // The trouble comes when we compare the to-space pointer with the from-space 568 // pointer to the same object. To resolve this, it will suffice to resolve both 569 // oldval and the value from memory -- this will give both to-space pointers. 570 // If they mismatch, then it was a legitimate failure. 571 // 572 if (UseCompressedOops) { 573 __ decode_heap_oop(tmp1); 574 } 575 resolve_forward_pointer(masm, tmp1); 576 577 if (UseCompressedOops) { 578 __ movl(tmp2, oldval); 579 __ decode_heap_oop(tmp2); 580 } else { 581 __ movptr(tmp2, oldval); 582 } 583 resolve_forward_pointer(masm, tmp2); 584 585 __ cmpptr(tmp1, tmp2); 586 __ jcc(Assembler::notEqual, done, true); 587 588 // Step 3. Try to CAS again with resolved to-space pointers. 589 // 590 // Corner case: it may happen that somebody stored the from-space pointer 591 // to memory while we were preparing for retry. Therefore, we can fail again 592 // on retry, and so need to do this in loop, always resolving the failure 593 // witness. 594 __ bind(retry); 595 if (os::is_MP()) __ lock(); 596 if (UseCompressedOops) { 597 __ cmpxchgl(newval, addr); 598 } else { 599 __ cmpxchgptr(newval, addr); 600 } 601 __ jcc(Assembler::equal, done, true); 602 603 if (UseCompressedOops) { 604 __ movl(tmp2, oldval); 605 __ decode_heap_oop(tmp2); 606 } else { 607 __ movptr(tmp2, oldval); 608 } 609 resolve_forward_pointer(masm, tmp2); 610 611 __ cmpptr(tmp1, tmp2); 612 __ jcc(Assembler::equal, retry, true); 613 614 // Step 4. If we need a boolean result out of CAS, check the flag again, 615 // and promote the result. Note that we handle the flag from both the CAS 616 // itself and from the retry loop. 617 __ bind(done); 618 if (!exchange) { 619 assert(res != NULL, "need result register"); 620 __ setb(Assembler::equal, res); 621 __ movzbl(res, res); 622 } 623 } 624 #endif // LP64 625 626 void ShenandoahBarrierSetAssembler::save_vector_registers(MacroAssembler* masm) { 627 int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8); 628 if (UseAVX > 2) { 629 num_xmm_regs = LP64_ONLY(32) NOT_LP64(8); 630 } 631 632 if (UseSSE == 1) { 633 __ subptr(rsp, sizeof(jdouble)*8); 634 for (int n = 0; n < 8; n++) { 635 __ movflt(Address(rsp, n*sizeof(jdouble)), as_XMMRegister(n)); 636 } 637 } else if (UseSSE >= 2) { 638 if (UseAVX > 2) { 639 __ push(rbx); 640 __ movl(rbx, 0xffff); 641 __ kmovwl(k1, rbx); 642 __ pop(rbx); 643 } 644 #ifdef COMPILER2 645 if (MaxVectorSize > 16) { 646 if(UseAVX > 2) { 647 // Save upper half of ZMM registers 648 __ subptr(rsp, 32*num_xmm_regs); 649 for (int n = 0; n < num_xmm_regs; n++) { 650 __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n)); 651 } 652 } 653 assert(UseAVX > 0, "256 bit vectors are supported only with AVX"); 654 // Save upper half of YMM registers 655 __ subptr(rsp, 16*num_xmm_regs); 656 for (int n = 0; n < num_xmm_regs; n++) { 657 __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n)); 658 } 659 } 660 #endif 661 // Save whole 128bit (16 bytes) XMM registers 662 __ subptr(rsp, 16*num_xmm_regs); 663 #ifdef _LP64 664 if (VM_Version::supports_evex()) { 665 for (int n = 0; n < num_xmm_regs; n++) { 666 __ vextractf32x4(Address(rsp, n*16), as_XMMRegister(n), 0); 667 } 668 } else { 669 for (int n = 0; n < num_xmm_regs; n++) { 670 __ movdqu(Address(rsp, n*16), as_XMMRegister(n)); 671 } 672 } 673 #else 674 for (int n = 0; n < num_xmm_regs; n++) { 675 __ movdqu(Address(rsp, n*16), as_XMMRegister(n)); 676 } 677 #endif 678 } 679 } 680 681 void ShenandoahBarrierSetAssembler::restore_vector_registers(MacroAssembler* masm) { 682 int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8); 683 if (UseAVX > 2) { 684 num_xmm_regs = LP64_ONLY(32) NOT_LP64(8); 685 } 686 if (UseSSE == 1) { 687 for (int n = 0; n < 8; n++) { 688 __ movflt(as_XMMRegister(n), Address(rsp, n*sizeof(jdouble))); 689 } 690 __ addptr(rsp, sizeof(jdouble)*8); 691 } else if (UseSSE >= 2) { 692 // Restore whole 128bit (16 bytes) XMM registers 693 #ifdef _LP64 694 if (VM_Version::supports_evex()) { 695 for (int n = 0; n < num_xmm_regs; n++) { 696 __ vinsertf32x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, n*16), 0); 697 } 698 } else { 699 for (int n = 0; n < num_xmm_regs; n++) { 700 __ movdqu(as_XMMRegister(n), Address(rsp, n*16)); 701 } 702 } 703 #else 704 for (int n = 0; n < num_xmm_regs; n++) { 705 __ movdqu(as_XMMRegister(n), Address(rsp, n*16)); 706 } 707 #endif 708 __ addptr(rsp, 16*num_xmm_regs); 709 710 #ifdef COMPILER2 711 if (MaxVectorSize > 16) { 712 // Restore upper half of YMM registers. 713 for (int n = 0; n < num_xmm_regs; n++) { 714 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16)); 715 } 716 __ addptr(rsp, 16*num_xmm_regs); 717 if (UseAVX > 2) { 718 for (int n = 0; n < num_xmm_regs; n++) { 719 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32)); 720 } 721 __ addptr(rsp, 32*num_xmm_regs); 722 } 723 } 724 #endif 725 } 726 } 727 728 #ifdef COMPILER1 729 730 #undef __ 731 #define __ ce->masm()-> 732 733 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 734 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 735 // At this point we know that marking is in progress. 736 // If do_load() is true then we have to emit the 737 // load of the previous value; otherwise it has already 738 // been loaded into _pre_val. 739 740 __ bind(*stub->entry()); 741 assert(stub->pre_val()->is_register(), "Precondition."); 742 743 Register pre_val_reg = stub->pre_val()->as_register(); 744 745 if (stub->do_load()) { 746 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); 747 } 748 749 __ cmpptr(pre_val_reg, (int32_t)NULL_WORD); 750 __ jcc(Assembler::equal, *stub->continuation()); 751 ce->store_parameter(stub->pre_val()->as_register(), 0); 752 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 753 __ jmp(*stub->continuation()); 754 755 } 756 757 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 758 __ bind(*stub->entry()); 759 760 Label done; 761 Register obj = stub->obj()->as_register(); 762 Register res = stub->result()->as_register(); 763 764 if (res != obj) { 765 __ mov(res, obj); 766 } 767 768 // Check for null. 769 if (stub->needs_null_check()) { 770 __ testptr(res, res); 771 __ jcc(Assembler::zero, done); 772 } 773 774 load_reference_barrier_not_null(ce->masm(), res); 775 776 __ bind(done); 777 __ jmp(*stub->continuation()); 778 } 779 780 #undef __ 781 782 #define __ sasm-> 783 784 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 785 __ prologue("shenandoah_pre_barrier", false); 786 // arg0 : previous value of memory 787 788 __ push(rax); 789 __ push(rdx); 790 791 const Register pre_val = rax; 792 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 793 const Register tmp = rdx; 794 795 NOT_LP64(__ get_thread(thread);) 796 797 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 798 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 799 800 Label done; 801 Label runtime; 802 803 // Is SATB still active? 804 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 805 __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 806 __ jcc(Assembler::zero, done); 807 808 // Can we store original value in the thread's buffer? 809 810 __ movptr(tmp, queue_index); 811 __ testptr(tmp, tmp); 812 __ jcc(Assembler::zero, runtime); 813 __ subptr(tmp, wordSize); 814 __ movptr(queue_index, tmp); 815 __ addptr(tmp, buffer); 816 817 // prev_val (rax) 818 __ load_parameter(0, pre_val); 819 __ movptr(Address(tmp, 0), pre_val); 820 __ jmp(done); 821 822 __ bind(runtime); 823 824 __ save_live_registers_no_oop_map(true); 825 826 // load the pre-value 827 __ load_parameter(0, rcx); 828 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread); 829 830 __ restore_live_registers(true); 831 832 __ bind(done); 833 834 __ pop(rdx); 835 __ pop(rax); 836 837 __ epilogue(); 838 } 839 840 #undef __ 841 842 #endif // COMPILER1 843 844 address ShenandoahBarrierSetAssembler::shenandoah_lrb() { 845 assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); 846 return _shenandoah_lrb; 847 } 848 849 #define __ cgen->assembler()-> 850 851 address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { 852 __ align(CodeEntryAlignment); 853 StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); 854 address start = __ pc(); 855 856 #ifdef _LP64 857 Label not_done; 858 859 // We use RDI, which also serves as argument register for slow call. 860 // RAX always holds the src object ptr, except after the slow call and 861 // the cmpxchg, then it holds the result. 862 // R8 and RCX are used as temporary registers. 863 __ push(rdi); 864 __ push(r8); 865 866 // Check for object beeing in the collection set. 867 // TODO: Can we use only 1 register here? 868 // The source object arrives here in rax. 869 // live: rax 870 // live: rdi 871 __ mov(rdi, rax); 872 __ shrptr(rdi, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 873 // live: r8 874 __ movptr(r8, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 875 __ movbool(r8, Address(r8, rdi, Address::times_1)); 876 // unlive: rdi 877 __ testbool(r8); 878 // unlive: r8 879 __ jccb(Assembler::notZero, not_done); 880 881 __ pop(r8); 882 __ pop(rdi); 883 __ ret(0); 884 885 __ bind(not_done); 886 887 __ push(rcx); 888 __ push(rdx); 889 __ push(rdi); 890 __ push(rsi); 891 __ push(r8); 892 __ push(r9); 893 __ push(r10); 894 __ push(r11); 895 __ push(r12); 896 __ push(r13); 897 __ push(r14); 898 __ push(r15); 899 save_vector_registers(cgen->assembler()); 900 __ movptr(rdi, rax); 901 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi); 902 restore_vector_registers(cgen->assembler()); 903 __ pop(r15); 904 __ pop(r14); 905 __ pop(r13); 906 __ pop(r12); 907 __ pop(r11); 908 __ pop(r10); 909 __ pop(r9); 910 __ pop(r8); 911 __ pop(rsi); 912 __ pop(rdi); 913 __ pop(rdx); 914 __ pop(rcx); 915 916 __ pop(r8); 917 __ pop(rdi); 918 __ ret(0); 919 #else 920 ShouldNotReachHere(); 921 #endif 922 return start; 923 } 924 925 #undef __ 926 927 void ShenandoahBarrierSetAssembler::barrier_stubs_init() { 928 if (ShenandoahLoadRefBarrier) { 929 int stub_code_size = 4096; 930 ResourceMark rm; 931 BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); 932 CodeBuffer buf(bb); 933 StubCodeGenerator cgen(&buf); 934 _shenandoah_lrb = generate_shenandoah_lrb(&cgen); 935 } 936 }