1 /* 2 * Copyright (c) 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 26 #include "gc/shenandoah/shenandoahHeap.hpp" 27 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 28 #include "gc/shenandoah/shenandoahHeuristics.hpp" 29 #include "gc/shenandoah/shenandoahRuntime.hpp" 30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/thread.hpp" 35 #include "utilities/macros.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_LIRAssembler.hpp" 38 #include "c1/c1_MacroAssembler.hpp" 39 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 40 #endif 41 42 #define __ masm-> 43 44 address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; 45 46 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 47 Register src, Register dst, Register count) { 48 49 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 50 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 51 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 52 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 53 54 if (type == T_OBJECT || type == T_ARRAY) { 55 #ifdef _LP64 56 if (!checkcast && !obj_int) { 57 // Save count for barrier 58 __ movptr(r11, count); 59 } else if (disjoint && obj_int) { 60 // Save dst in r11 in the disjoint case 61 __ movq(r11, dst); 62 } 63 #else 64 if (disjoint) { 65 __ mov(rdx, dst); // save 'to' 66 } 67 #endif 68 69 if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { 70 Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 71 #ifndef _LP64 72 __ push(thread); 73 __ get_thread(thread); 74 #endif 75 76 Label done; 77 // Short-circuit if count == 0. 78 __ testptr(count, count); 79 __ jcc(Assembler::zero, done); 80 81 // Avoid runtime call when not marking. 82 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 83 __ testb(gc_state, ShenandoahHeap::MARKING); 84 __ jcc(Assembler::zero, done); 85 86 __ pusha(); // push registers 87 #ifdef _LP64 88 if (count == c_rarg0) { 89 if (dst == c_rarg1) { 90 // exactly backwards!! 91 __ xchgptr(c_rarg1, c_rarg0); 92 } else { 93 __ movptr(c_rarg1, count); 94 __ movptr(c_rarg0, dst); 95 } 96 } else { 97 __ movptr(c_rarg0, dst); 98 __ movptr(c_rarg1, count); 99 } 100 if (UseCompressedOops) { 101 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2); 102 } else { 103 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); 104 } 105 #else 106 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 107 dst, count); 108 #endif 109 __ popa(); 110 __ bind(done); 111 NOT_LP64(__ pop(thread);) 112 } 113 } 114 115 } 116 117 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 118 Register src, Register dst, Register count) { 119 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 120 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 121 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 122 Register tmp = rax; 123 124 if (type == T_OBJECT || type == T_ARRAY) { 125 #ifdef _LP64 126 if (!checkcast && !obj_int) { 127 // Save count for barrier 128 count = r11; 129 } else if (disjoint && obj_int) { 130 // Use the saved dst in the disjoint case 131 dst = r11; 132 } else if (checkcast) { 133 tmp = rscratch1; 134 } 135 #else 136 if (disjoint) { 137 __ mov(dst, rdx); // restore 'to' 138 } 139 #endif 140 141 Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 142 #ifndef _LP64 143 __ push(thread); 144 __ get_thread(thread); 145 #endif 146 147 // Short-circuit if count == 0. 148 Label done; 149 __ testptr(count, count); 150 __ jcc(Assembler::zero, done); 151 152 // Skip runtime call if no forwarded objects. 153 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 154 __ testb(gc_state, ShenandoahHeap::UPDATEREFS); 155 __ jcc(Assembler::zero, done); 156 157 __ pusha(); // push registers (overkill) 158 #ifdef _LP64 159 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 160 assert_different_registers(c_rarg1, dst); 161 __ mov(c_rarg1, count); 162 __ mov(c_rarg0, dst); 163 } else { 164 assert_different_registers(c_rarg0, count); 165 __ mov(c_rarg0, dst); 166 __ mov(c_rarg1, count); 167 } 168 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); 169 #else 170 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 171 dst, count); 172 #endif 173 __ popa(); 174 175 __ bind(done); 176 NOT_LP64(__ pop(thread);) 177 } 178 } 179 180 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 181 Register obj, 182 Register pre_val, 183 Register thread, 184 Register tmp, 185 bool tosca_live, 186 bool expand_call) { 187 188 if (ShenandoahSATBBarrier) { 189 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 190 } 191 } 192 193 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 194 Register obj, 195 Register pre_val, 196 Register thread, 197 Register tmp, 198 bool tosca_live, 199 bool expand_call) { 200 // If expand_call is true then we expand the call_VM_leaf macro 201 // directly to skip generating the check by 202 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 203 204 #ifdef _LP64 205 assert(thread == r15_thread, "must be"); 206 #endif // _LP64 207 208 Label done; 209 Label runtime; 210 211 assert(pre_val != noreg, "check this code"); 212 213 if (obj != noreg) { 214 assert_different_registers(obj, pre_val, tmp); 215 assert(pre_val != rax, "check this code"); 216 } 217 218 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 219 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 220 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 221 222 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 223 __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 224 __ jcc(Assembler::zero, done); 225 226 // Do we need to load the previous value? 227 if (obj != noreg) { 228 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 229 } 230 231 // Is the previous value null? 232 __ cmpptr(pre_val, (int32_t) NULL_WORD); 233 __ jcc(Assembler::equal, done); 234 235 // Can we store original value in the thread's buffer? 236 // Is index == 0? 237 // (The index field is typed as size_t.) 238 239 __ movptr(tmp, index); // tmp := *index_adr 240 __ cmpptr(tmp, 0); // tmp == 0? 241 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 242 243 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 244 __ movptr(index, tmp); // *index_adr := tmp 245 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 246 247 // Record the previous value 248 __ movptr(Address(tmp, 0), pre_val); 249 __ jmp(done); 250 251 __ bind(runtime); 252 // save the live input values 253 if(tosca_live) __ push(rax); 254 255 if (obj != noreg && obj != rax) 256 __ push(obj); 257 258 if (pre_val != rax) 259 __ push(pre_val); 260 261 // Calling the runtime using the regular call_VM_leaf mechanism generates 262 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 263 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. 264 // 265 // If we care generating the pre-barrier without a frame (e.g. in the 266 // intrinsified Reference.get() routine) then ebp might be pointing to 267 // the caller frame and so this check will most likely fail at runtime. 268 // 269 // Expanding the call directly bypasses the generation of the check. 270 // So when we do not have have a full interpreter frame on the stack 271 // expand_call should be passed true. 272 273 NOT_LP64( __ push(thread); ) 274 275 #ifdef _LP64 276 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should 277 // pre_val be c_rarg1 (where the call prologue would copy thread argument). 278 // Note: this should not accidentally smash thread, because thread is always r15. 279 assert(thread != c_rarg0, "smashed arg"); 280 if (c_rarg0 != pre_val) { 281 __ mov(c_rarg0, pre_val); 282 } 283 #endif 284 285 if (expand_call) { 286 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 287 #ifdef _LP64 288 if (c_rarg1 != thread) { 289 __ mov(c_rarg1, thread); 290 } 291 // Already moved pre_val into c_rarg0 above 292 #else 293 __ push(thread); 294 __ push(pre_val); 295 #endif 296 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2); 297 } else { 298 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); 299 } 300 301 NOT_LP64( __ pop(thread); ) 302 303 // save the live input values 304 if (pre_val != rax) 305 __ pop(pre_val); 306 307 if (obj != noreg && obj != rax) 308 __ pop(obj); 309 310 if(tosca_live) __ pop(rax); 311 312 __ bind(done); 313 } 314 315 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) { 316 assert(ShenandoahCASBarrier, "should be enabled"); 317 Label is_null; 318 __ testptr(dst, dst); 319 __ jcc(Assembler::zero, is_null); 320 resolve_forward_pointer_not_null(masm, dst); 321 __ bind(is_null); 322 } 323 324 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) { 325 assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled"); 326 __ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset())); 327 } 328 329 330 void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) { 331 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 332 #ifdef _LP64 333 Label done; 334 335 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 336 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 337 __ jccb(Assembler::zero, done); 338 339 // Heap is unstable, need to perform the resolve even if LRB is inactive 340 resolve_forward_pointer_not_null(masm, dst); 341 342 __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); 343 __ jccb(Assembler::zero, done); 344 345 if (dst != rax) { 346 __ xchgptr(dst, rax); // Move obj into rax and save rax into obj. 347 } 348 349 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); 350 351 if (dst != rax) { 352 __ xchgptr(rax, dst); // Swap back obj with rax. 353 } 354 355 __ bind(done); 356 #else 357 Unimplemented(); 358 #endif 359 } 360 361 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { 362 if (ShenandoahStoreValEnqueueBarrier) { 363 storeval_barrier_impl(masm, dst, tmp); 364 } 365 } 366 367 void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { 368 assert(ShenandoahStoreValEnqueueBarrier, "should be enabled"); 369 370 if (dst == noreg) return; 371 372 #ifdef _LP64 373 if (ShenandoahStoreValEnqueueBarrier) { 374 // The set of registers to be saved+restored is the same as in the write-barrier above. 375 // Those are the commonly used registers in the interpreter. 376 __ pusha(); 377 // __ push_callee_saved_registers(); 378 __ subptr(rsp, 2 * Interpreter::stackElementSize); 379 __ movdbl(Address(rsp, 0), xmm0); 380 381 satb_write_barrier_pre(masm, noreg, dst, r15_thread, tmp, true, false); 382 __ movdbl(xmm0, Address(rsp, 0)); 383 __ addptr(rsp, 2 * Interpreter::stackElementSize); 384 //__ pop_callee_saved_registers(); 385 __ popa(); 386 } 387 #else 388 Unimplemented(); 389 #endif 390 } 391 392 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) { 393 if (ShenandoahLoadRefBarrier) { 394 Label done; 395 __ testptr(dst, dst); 396 __ jcc(Assembler::zero, done); 397 load_reference_barrier_not_null(masm, dst); 398 __ bind(done); 399 } 400 } 401 402 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 403 Register dst, Address src, Register tmp1, Register tmp_thread) { 404 bool on_oop = type == T_OBJECT || type == T_ARRAY; 405 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 406 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 407 bool on_reference = on_weak || on_phantom; 408 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 409 if (on_oop) { 410 load_reference_barrier(masm, dst); 411 412 if (ShenandoahKeepAliveBarrier && on_reference) { 413 const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 414 NOT_LP64(__ get_thread(thread)); 415 // Generate the SATB pre-barrier code to log the value of 416 // the referent field in an SATB buffer. 417 shenandoah_write_barrier_pre(masm /* masm */, 418 noreg /* obj */, 419 dst /* pre_val */, 420 thread /* thread */, 421 tmp1 /* tmp */, 422 true /* tosca_live */, 423 true /* expand_call */); 424 } 425 } 426 } 427 428 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 429 Address dst, Register val, Register tmp1, Register tmp2) { 430 431 bool on_oop = type == T_OBJECT || type == T_ARRAY; 432 bool in_heap = (decorators & IN_HEAP) != 0; 433 bool as_normal = (decorators & AS_NORMAL) != 0; 434 if (on_oop && in_heap) { 435 bool needs_pre_barrier = as_normal; 436 437 Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi); 438 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 439 // flatten object address if needed 440 // We do it regardless of precise because we need the registers 441 if (dst.index() == noreg && dst.disp() == 0) { 442 if (dst.base() != tmp1) { 443 __ movptr(tmp1, dst.base()); 444 } 445 } else { 446 __ lea(tmp1, dst); 447 } 448 449 #ifndef _LP64 450 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 451 #endif 452 453 NOT_LP64(__ get_thread(rcx)); 454 NOT_LP64(imasm->save_bcp()); 455 456 if (needs_pre_barrier) { 457 shenandoah_write_barrier_pre(masm /*masm*/, 458 tmp1 /* obj */, 459 tmp2 /* pre_val */, 460 rthread /* thread */, 461 tmp3 /* tmp */, 462 val != noreg /* tosca_live */, 463 false /* expand_call */); 464 } 465 if (val == noreg) { 466 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); 467 } else { 468 storeval_barrier(masm, val, tmp3); 469 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg); 470 } 471 NOT_LP64(imasm->restore_bcp()); 472 } else { 473 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); 474 } 475 } 476 477 void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, 478 Register thread, Register obj, 479 Register var_size_in_bytes, 480 int con_size_in_bytes, 481 Register t1, Register t2, 482 Label& slow_case) { 483 assert_different_registers(obj, t1, t2); 484 assert_different_registers(obj, var_size_in_bytes, t1); 485 Register end = t2; 486 if (!thread->is_valid()) { 487 #ifdef _LP64 488 thread = r15_thread; 489 #else 490 assert(t1->is_valid(), "need temp reg"); 491 thread = t1; 492 __ get_thread(thread); 493 #endif 494 } 495 496 __ verify_tlab(); 497 498 __ movptr(obj, Address(thread, JavaThread::tlab_top_offset())); 499 if (var_size_in_bytes == noreg) { 500 __ lea(end, Address(obj, con_size_in_bytes + ShenandoahBrooksPointer::byte_size())); 501 } else { 502 __ addptr(var_size_in_bytes, ShenandoahBrooksPointer::byte_size()); 503 __ lea(end, Address(obj, var_size_in_bytes, Address::times_1)); 504 } 505 __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); 506 __ jcc(Assembler::above, slow_case); 507 508 // update the tlab top pointer 509 __ movptr(Address(thread, JavaThread::tlab_top_offset()), end); 510 511 // Initialize brooks pointer 512 #ifdef _LP64 513 __ incrementq(obj, ShenandoahBrooksPointer::byte_size()); 514 #else 515 __ incrementl(obj, ShenandoahBrooksPointer::byte_size()); 516 #endif 517 __ movptr(Address(obj, ShenandoahBrooksPointer::byte_offset()), obj); 518 519 // recover var_size_in_bytes if necessary 520 if (var_size_in_bytes == end) { 521 __ subptr(var_size_in_bytes, obj); 522 } 523 __ verify_tlab(); 524 } 525 526 // Special Shenandoah CAS implementation that handles false negatives 527 // due to concurrent evacuation. 528 #ifndef _LP64 529 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 530 Register res, Address addr, Register oldval, Register newval, 531 bool exchange, Register tmp1, Register tmp2) { 532 // Shenandoah has no 32-bit version for this. 533 Unimplemented(); 534 } 535 #else 536 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 537 Register res, Address addr, Register oldval, Register newval, 538 bool exchange, Register tmp1, Register tmp2) { 539 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); 540 assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); 541 542 Label retry, done; 543 544 // Remember oldval for retry logic below 545 if (UseCompressedOops) { 546 __ movl(tmp1, oldval); 547 } else { 548 __ movptr(tmp1, oldval); 549 } 550 551 // Step 1. Try to CAS with given arguments. If successful, then we are done, 552 // and can safely return. 553 if (os::is_MP()) __ lock(); 554 if (UseCompressedOops) { 555 __ cmpxchgl(newval, addr); 556 } else { 557 __ cmpxchgptr(newval, addr); 558 } 559 __ jcc(Assembler::equal, done, true); 560 561 // Step 2. CAS had failed. This may be a false negative. 562 // 563 // The trouble comes when we compare the to-space pointer with the from-space 564 // pointer to the same object. To resolve this, it will suffice to resolve both 565 // oldval and the value from memory -- this will give both to-space pointers. 566 // If they mismatch, then it was a legitimate failure. 567 // 568 if (UseCompressedOops) { 569 __ decode_heap_oop(tmp1); 570 } 571 resolve_forward_pointer(masm, tmp1); 572 573 if (UseCompressedOops) { 574 __ movl(tmp2, oldval); 575 __ decode_heap_oop(tmp2); 576 } else { 577 __ movptr(tmp2, oldval); 578 } 579 resolve_forward_pointer(masm, tmp2); 580 581 __ cmpptr(tmp1, tmp2); 582 __ jcc(Assembler::notEqual, done, true); 583 584 // Step 3. Try to CAS again with resolved to-space pointers. 585 // 586 // Corner case: it may happen that somebody stored the from-space pointer 587 // to memory while we were preparing for retry. Therefore, we can fail again 588 // on retry, and so need to do this in loop, always resolving the failure 589 // witness. 590 __ bind(retry); 591 if (os::is_MP()) __ lock(); 592 if (UseCompressedOops) { 593 __ cmpxchgl(newval, addr); 594 } else { 595 __ cmpxchgptr(newval, addr); 596 } 597 __ jcc(Assembler::equal, done, true); 598 599 if (UseCompressedOops) { 600 __ movl(tmp2, oldval); 601 __ decode_heap_oop(tmp2); 602 } else { 603 __ movptr(tmp2, oldval); 604 } 605 resolve_forward_pointer(masm, tmp2); 606 607 __ cmpptr(tmp1, tmp2); 608 __ jcc(Assembler::equal, retry, true); 609 610 // Step 4. If we need a boolean result out of CAS, check the flag again, 611 // and promote the result. Note that we handle the flag from both the CAS 612 // itself and from the retry loop. 613 __ bind(done); 614 if (!exchange) { 615 assert(res != NULL, "need result register"); 616 __ setb(Assembler::equal, res); 617 __ movzbl(res, res); 618 } 619 } 620 #endif // LP64 621 622 void ShenandoahBarrierSetAssembler::save_vector_registers(MacroAssembler* masm) { 623 int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8); 624 if (UseAVX > 2) { 625 num_xmm_regs = LP64_ONLY(32) NOT_LP64(8); 626 } 627 628 if (UseSSE == 1) { 629 __ subptr(rsp, sizeof(jdouble)*8); 630 for (int n = 0; n < 8; n++) { 631 __ movflt(Address(rsp, n*sizeof(jdouble)), as_XMMRegister(n)); 632 } 633 } else if (UseSSE >= 2) { 634 if (UseAVX > 2) { 635 __ push(rbx); 636 __ movl(rbx, 0xffff); 637 __ kmovwl(k1, rbx); 638 __ pop(rbx); 639 } 640 #ifdef COMPILER2 641 if (MaxVectorSize > 16) { 642 if(UseAVX > 2) { 643 // Save upper half of ZMM registers 644 __ subptr(rsp, 32*num_xmm_regs); 645 for (int n = 0; n < num_xmm_regs; n++) { 646 __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n)); 647 } 648 } 649 assert(UseAVX > 0, "256 bit vectors are supported only with AVX"); 650 // Save upper half of YMM registers 651 __ subptr(rsp, 16*num_xmm_regs); 652 for (int n = 0; n < num_xmm_regs; n++) { 653 __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n)); 654 } 655 } 656 #endif 657 // Save whole 128bit (16 bytes) XMM registers 658 __ subptr(rsp, 16*num_xmm_regs); 659 #ifdef _LP64 660 if (VM_Version::supports_evex()) { 661 for (int n = 0; n < num_xmm_regs; n++) { 662 __ vextractf32x4(Address(rsp, n*16), as_XMMRegister(n), 0); 663 } 664 } else { 665 for (int n = 0; n < num_xmm_regs; n++) { 666 __ movdqu(Address(rsp, n*16), as_XMMRegister(n)); 667 } 668 } 669 #else 670 for (int n = 0; n < num_xmm_regs; n++) { 671 __ movdqu(Address(rsp, n*16), as_XMMRegister(n)); 672 } 673 #endif 674 } 675 } 676 677 void ShenandoahBarrierSetAssembler::restore_vector_registers(MacroAssembler* masm) { 678 int num_xmm_regs = LP64_ONLY(16) NOT_LP64(8); 679 if (UseAVX > 2) { 680 num_xmm_regs = LP64_ONLY(32) NOT_LP64(8); 681 } 682 if (UseSSE == 1) { 683 for (int n = 0; n < 8; n++) { 684 __ movflt(as_XMMRegister(n), Address(rsp, n*sizeof(jdouble))); 685 } 686 __ addptr(rsp, sizeof(jdouble)*8); 687 } else if (UseSSE >= 2) { 688 // Restore whole 128bit (16 bytes) XMM registers 689 #ifdef _LP64 690 if (VM_Version::supports_evex()) { 691 for (int n = 0; n < num_xmm_regs; n++) { 692 __ vinsertf32x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, n*16), 0); 693 } 694 } else { 695 for (int n = 0; n < num_xmm_regs; n++) { 696 __ movdqu(as_XMMRegister(n), Address(rsp, n*16)); 697 } 698 } 699 #else 700 for (int n = 0; n < num_xmm_regs; n++) { 701 __ movdqu(as_XMMRegister(n), Address(rsp, n*16)); 702 } 703 #endif 704 __ addptr(rsp, 16*num_xmm_regs); 705 706 #ifdef COMPILER2 707 if (MaxVectorSize > 16) { 708 // Restore upper half of YMM registers. 709 for (int n = 0; n < num_xmm_regs; n++) { 710 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16)); 711 } 712 __ addptr(rsp, 16*num_xmm_regs); 713 if (UseAVX > 2) { 714 for (int n = 0; n < num_xmm_regs; n++) { 715 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32)); 716 } 717 __ addptr(rsp, 32*num_xmm_regs); 718 } 719 } 720 #endif 721 } 722 } 723 724 #ifdef COMPILER1 725 726 #undef __ 727 #define __ ce->masm()-> 728 729 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 730 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 731 // At this point we know that marking is in progress. 732 // If do_load() is true then we have to emit the 733 // load of the previous value; otherwise it has already 734 // been loaded into _pre_val. 735 736 __ bind(*stub->entry()); 737 assert(stub->pre_val()->is_register(), "Precondition."); 738 739 Register pre_val_reg = stub->pre_val()->as_register(); 740 741 if (stub->do_load()) { 742 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); 743 } 744 745 __ cmpptr(pre_val_reg, (int32_t)NULL_WORD); 746 __ jcc(Assembler::equal, *stub->continuation()); 747 ce->store_parameter(stub->pre_val()->as_register(), 0); 748 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 749 __ jmp(*stub->continuation()); 750 751 } 752 753 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 754 __ bind(*stub->entry()); 755 756 Label done; 757 Register obj = stub->obj()->as_register(); 758 Register res = stub->result()->as_register(); 759 760 if (res != obj) { 761 __ mov(res, obj); 762 } 763 764 // Check for null. 765 if (stub->needs_null_check()) { 766 __ testptr(res, res); 767 __ jcc(Assembler::zero, done); 768 } 769 770 load_reference_barrier_not_null(ce->masm(), res); 771 772 __ bind(done); 773 __ jmp(*stub->continuation()); 774 } 775 776 #undef __ 777 778 #define __ sasm-> 779 780 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 781 __ prologue("shenandoah_pre_barrier", false); 782 // arg0 : previous value of memory 783 784 __ push(rax); 785 __ push(rdx); 786 787 const Register pre_val = rax; 788 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 789 const Register tmp = rdx; 790 791 NOT_LP64(__ get_thread(thread);) 792 793 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 794 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 795 796 Label done; 797 Label runtime; 798 799 // Is SATB still active? 800 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 801 __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); 802 __ jcc(Assembler::zero, done); 803 804 // Can we store original value in the thread's buffer? 805 806 __ movptr(tmp, queue_index); 807 __ testptr(tmp, tmp); 808 __ jcc(Assembler::zero, runtime); 809 __ subptr(tmp, wordSize); 810 __ movptr(queue_index, tmp); 811 __ addptr(tmp, buffer); 812 813 // prev_val (rax) 814 __ load_parameter(0, pre_val); 815 __ movptr(Address(tmp, 0), pre_val); 816 __ jmp(done); 817 818 __ bind(runtime); 819 820 __ save_live_registers_no_oop_map(true); 821 822 // load the pre-value 823 __ load_parameter(0, rcx); 824 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread); 825 826 __ restore_live_registers(true); 827 828 __ bind(done); 829 830 __ pop(rdx); 831 __ pop(rax); 832 833 __ epilogue(); 834 } 835 836 #undef __ 837 838 #endif // COMPILER1 839 840 address ShenandoahBarrierSetAssembler::shenandoah_lrb() { 841 assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); 842 return _shenandoah_lrb; 843 } 844 845 #define __ cgen->assembler()-> 846 847 address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { 848 __ align(CodeEntryAlignment); 849 StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); 850 address start = __ pc(); 851 852 #ifdef _LP64 853 Label not_done; 854 855 // We use RDI, which also serves as argument register for slow call. 856 // RAX always holds the src object ptr, except after the slow call and 857 // the cmpxchg, then it holds the result. 858 // R8 and RCX are used as temporary registers. 859 __ push(rdi); 860 __ push(r8); 861 862 // Check for object beeing in the collection set. 863 // TODO: Can we use only 1 register here? 864 // The source object arrives here in rax. 865 // live: rax 866 // live: rdi 867 __ mov(rdi, rax); 868 __ shrptr(rdi, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 869 // live: r8 870 __ movptr(r8, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 871 __ movbool(r8, Address(r8, rdi, Address::times_1)); 872 // unlive: rdi 873 __ testbool(r8); 874 // unlive: r8 875 __ jccb(Assembler::notZero, not_done); 876 877 __ pop(r8); 878 __ pop(rdi); 879 __ ret(0); 880 881 __ bind(not_done); 882 883 __ push(rcx); 884 __ push(rdx); 885 __ push(rdi); 886 __ push(rsi); 887 __ push(r8); 888 __ push(r9); 889 __ push(r10); 890 __ push(r11); 891 __ push(r12); 892 __ push(r13); 893 __ push(r14); 894 __ push(r15); 895 save_vector_registers(cgen->assembler()); 896 __ movptr(rdi, rax); 897 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi); 898 restore_vector_registers(cgen->assembler()); 899 __ pop(r15); 900 __ pop(r14); 901 __ pop(r13); 902 __ pop(r12); 903 __ pop(r11); 904 __ pop(r10); 905 __ pop(r9); 906 __ pop(r8); 907 __ pop(rsi); 908 __ pop(rdi); 909 __ pop(rdx); 910 __ pop(rcx); 911 912 __ pop(r8); 913 __ pop(rdi); 914 __ ret(0); 915 #else 916 ShouldNotReachHere(); 917 #endif 918 return start; 919 } 920 921 #undef __ 922 923 void ShenandoahBarrierSetAssembler::barrier_stubs_init() { 924 if (ShenandoahLoadRefBarrier) { 925 int stub_code_size = 4096; 926 ResourceMark rm; 927 BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); 928 CodeBuffer buf(bb); 929 StubCodeGenerator cgen(&buf); 930 _shenandoah_lrb = generate_shenandoah_lrb(&cgen); 931 } 932 }