8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "registerSaver_s390.hpp"
28 #include "gc/g1/g1CardTable.hpp"
29 #include "gc/g1/g1BarrierSet.hpp"
30 #include "gc/g1/g1BarrierSetAssembler.hpp"
31 #include "gc/g1/g1ThreadLocalData.hpp"
32 #include "gc/g1/heapRegion.hpp"
33 #include "interpreter/interp_masm.hpp"
34 #include "runtime/sharedRuntime.hpp"
35
36 #define __ masm->
37
38 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
39
40 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
41 Register addr, Register count) {
42 bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
43
44 // With G1, don't generate the call if we statically know that the target is uninitialized.
45 if (!dest_uninitialized) {
46 // Is marking active?
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "registerSaver_s390.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "gc/g1/c1/g1BarrierSetC1.hpp"
31 #include "gc/g1/g1CardTable.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1BarrierSetAssembler.hpp"
34 #include "gc/g1/g1ThreadLocalData.hpp"
35 #include "gc/g1/heapRegion.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "runtime/sharedRuntime.hpp"
38
39 #define __ masm->
40
41 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
42
43 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
44 Register addr, Register count) {
45 bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
46
47 // With G1, don't generate the call if we statically know that the target is uninitialized.
48 if (!dest_uninitialized) {
49 // Is marking active?
|
386 g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
387 }
388 }
389
390 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
391 NearLabel Ldone, Lnot_weak;
392 __ z_ltgr(tmp1, value);
393 __ z_bre(Ldone); // Use NULL result as-is.
394
395 __ z_nill(value, ~JNIHandles::weak_tag_mask);
396 __ z_lg(value, 0, value); // Resolve (untagged) jobject.
397
398 __ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
399 __ z_braz(Lnot_weak);
400 __ verify_oop(value);
401 DecoratorSet decorators = IN_ROOT | ON_PHANTOM_OOP_REF;
402 g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
403 __ bind(Lnot_weak);
404 __ verify_oop(value);
405 __ bind(Ldone);
406 }
407
408 #undef __
|
389 g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
390 }
391 }
392
393 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
394 NearLabel Ldone, Lnot_weak;
395 __ z_ltgr(tmp1, value);
396 __ z_bre(Ldone); // Use NULL result as-is.
397
398 __ z_nill(value, ~JNIHandles::weak_tag_mask);
399 __ z_lg(value, 0, value); // Resolve (untagged) jobject.
400
401 __ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
402 __ z_braz(Lnot_weak);
403 __ verify_oop(value);
404 DecoratorSet decorators = IN_ROOT | ON_PHANTOM_OOP_REF;
405 g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
406 __ bind(Lnot_weak);
407 __ verify_oop(value);
408 __ bind(Ldone);
409 }
410
411 #undef __
412 #define __ ce->masm()->
413
414 void G1BarrierSetAssembler::gen_g1_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
415 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
416 // At this point we know that marking is in progress.
417 // If do_load() is true then we have to emit the
418 // load of the previous value; otherwise it has already
419 // been loaded into _pre_val.
420 __ bind(*stub->entry());
421 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
422 assert(stub->pre_val()->is_register(), "Precondition.");
423
424 Register pre_val_reg = stub->pre_val()->as_register();
425
426 if (stub->do_load()) {
427 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
428 }
429
430 __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
431 __ branch_optimized(Assembler::bcondZero, *stub->continuation());
432 ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
433 __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
434 }
435
436 void G1BarrierSetAssembler::gen_g1_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
437 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
438 __ bind(*stub->entry());
439 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
440 assert(stub->addr()->is_register(), "Precondition.");
441 assert(stub->new_val()->is_register(), "Precondition.");
442 Register new_val_reg = stub->new_val()->as_register();
443 __ z_ltgr(new_val_reg, new_val_reg);
444 __ branch_optimized(Assembler::bcondZero, *stub->continuation());
445 __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
446 ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
447 __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
448 }
449
450 #undef __
451
452 #define __ sasm->
453
454 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
455 __ block_comment("save_volatile_registers");
456 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
457 int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
458 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
459 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
460 }
461
462 static void restore_volatile_registers(StubAssembler* sasm) {
463 __ block_comment("restore_volatile_registers");
464 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
465 RegisterSaver::restore_live_registers(sasm, reg_set);
466 }
467
468 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
469 // Z_R1_scratch: previous value of memory
470
471 BarrierSet* bs = BarrierSet::barrier_set();
472 __ set_info("g1_pre_barrier_slow_id", false);
473
474 Register pre_val = Z_R1_scratch;
475 Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
476 Register tmp2 = Z_R7;
477
478 Label refill, restart, marking_not_active;
479 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
480 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
481 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
482
483 // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
484 __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
485 __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
486
487 // Is marking still active?
488 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
489 __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
490 } else {
491 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
492 __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
493 }
494 __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
495
496 __ bind(restart);
497 // Load the index into the SATB buffer. SATBMarkQueue::_index is a
498 // size_t so ld_ptr is appropriate.
499 __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
500
501 // index == 0?
502 __ z_brz(refill);
503
504 __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
505 __ add2reg(tmp, -oopSize);
506
507 __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
508 __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
509
510 __ bind(marking_not_active);
511 // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
512 __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
513 __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
514 __ z_br(Z_R14);
515
516 __ bind(refill);
517 save_volatile_registers(sasm);
518 __ z_lgr(tmp, pre_val); // save pre_val
519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
520 Z_thread);
521 __ z_lgr(pre_val, tmp); // restore pre_val
522 restore_volatile_registers(sasm);
523 __ z_bru(restart);
524 }
525
526 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
527 // Z_R1_scratch: oop address, address of updated memory slot
528
529 BarrierSet* bs = BarrierSet::barrier_set();
530 __ set_info("g1_post_barrier_slow_id", false);
531
532 Register addr_oop = Z_R1_scratch;
533 Register addr_card = Z_R1_scratch;
534 Register r1 = Z_R6; // Must be saved/restored.
535 Register r2 = Z_R7; // Must be saved/restored.
536 Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
537 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
538 CardTable* ct = ctbs->card_table();
539 jbyte* byte_map_base = ct->byte_map_base();
540
541 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
542 __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
543
544 Label not_already_dirty, restart, refill, young_card;
545
546 // Calculate address of card corresponding to the updated oop slot.
547 AddressLiteral rs(byte_map_base);
548 __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
549 addr_oop = noreg; // dead now
550 __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
551 __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
552
553 __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
554 __ z_bre(young_card);
555
556 __ z_sync(); // Required to support concurrent cleaning.
557
558 __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
559 __ z_brne(not_already_dirty);
560
561 __ bind(young_card);
562 // We didn't take the branch, so we're already dirty: restore
563 // used registers and return.
564 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
565 __ z_br(Z_R14);
566
567 // Not dirty.
568 __ bind(not_already_dirty);
569
570 // First, dirty it: [addr_card] := 0
571 __ z_mvi(0, addr_card, CardTable::dirty_card_val());
572
573 Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
574 Register buf = r2;
575 cardtable = noreg; // now dead
576
577 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
578 __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
579
580 ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
581 ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
582
583 __ bind(restart);
584
585 // Get the index into the update buffer. DirtyCardQueue::_index is
586 // a size_t so z_ltg is appropriate here.
587 __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
588
589 // index == 0?
590 __ z_brz(refill);
591
592 __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
593 __ add2reg(idx, -oopSize);
594
595 __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
596 __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
597 // Restore killed registers and return.
598 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
599 __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
600 __ z_br(Z_R14);
601
602 __ bind(refill);
603 save_volatile_registers(sasm);
604 __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
605 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
606 Z_thread);
607 __ z_lgr(addr_card, idx);
608 restore_volatile_registers(sasm); // Restore addr_card.
609 __ z_bru(restart);
610 }
611
612 #undef __
|