24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "gc/shared/cardTable.hpp"
32 #include "gc/shared/cardTableBarrierSet.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "nativeInst_arm.hpp"
35 #include "oops/compiledICHolder.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "register_arm.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/signature.hpp"
41 #include "runtime/vframeArray.hpp"
42 #include "utilities/align.hpp"
43 #include "vmreg_arm.inline.hpp"
44 #if INCLUDE_ALL_GCS
45 #include "gc/g1/g1BarrierSet.hpp"
46 #include "gc/g1/g1CardTable.hpp"
47 #include "gc/g1/g1ThreadLocalData.hpp"
48 #endif
49
50 // Note: Rtemp usage is this file should not impact C2 and should be
51 // correct as long as it is not implicitly used in lower layers (the
52 // arm [macro]assembler) and used with care in the other C1 specific
53 // files.
54
55 // Implementation of StubAssembler
56
57 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
58 mov(R0, Rthread);
59
60 int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
61
62 call(entry);
63 if (call_offset == -1) { // PC not saved
64 call_offset = offset();
65 }
66 reset_last_Java_frame(Rtemp);
67
|
24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "gc/shared/cardTable.hpp"
32 #include "gc/shared/cardTableBarrierSet.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "nativeInst_arm.hpp"
35 #include "oops/compiledICHolder.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "register_arm.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/signature.hpp"
41 #include "runtime/vframeArray.hpp"
42 #include "utilities/align.hpp"
43 #include "vmreg_arm.inline.hpp"
44
45 // Note: Rtemp usage is this file should not impact C2 and should be
46 // correct as long as it is not implicitly used in lower layers (the
47 // arm [macro]assembler) and used with care in the other C1 specific
48 // files.
49
50 // Implementation of StubAssembler
51
52 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
53 mov(R0, Rthread);
54
55 int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
56
57 call(entry);
58 if (call_offset == -1) { // PC not saved
59 call_offset = offset();
60 }
61 reset_last_Java_frame(Rtemp);
62
|
338
339
340 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
341 restore_live_registers(sasm, false, true, true, restore_fpu_registers);
342 }
343
344 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
345 restore_live_registers(sasm, true, true, true, restore_fpu_registers);
346 }
347
348 #ifndef AARCH64
349 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
350 restore_live_registers(sasm, true, false, false, restore_fpu_registers);
351 }
352 #endif // !AARCH64
353
354 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
355 restore_live_registers(sasm, true, true, false, restore_fpu_registers);
356 }
357
358
359 void Runtime1::initialize_pd() {
360 }
361
362
363 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
364 OopMap* oop_map = save_live_registers(sasm);
365
366 if (has_argument) {
367 __ ldr(R1, Address(SP, arg1_offset));
368 }
369
370 int call_offset = __ call_RT(noreg, noreg, target);
371 OopMapSet* oop_maps = new OopMapSet();
372 oop_maps->add_gc_map(call_offset, oop_map);
373
374 DEBUG_ONLY(STOP("generate_exception_throw");) // Should not reach here
375 return oop_maps;
376 }
|
333
334
335 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
336 restore_live_registers(sasm, false, true, true, restore_fpu_registers);
337 }
338
339 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
340 restore_live_registers(sasm, true, true, true, restore_fpu_registers);
341 }
342
343 #ifndef AARCH64
344 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
345 restore_live_registers(sasm, true, false, false, restore_fpu_registers);
346 }
347 #endif // !AARCH64
348
349 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
350 restore_live_registers(sasm, true, true, false, restore_fpu_registers);
351 }
352
353 void StubAssembler::save_live_registers() {
354 save_live_registers(this);
355 }
356
357 void StubAssembler::restore_live_registers_without_return() {
358 restore_live_registers_without_return(this);
359 }
360
361 void Runtime1::initialize_pd() {
362 }
363
364
365 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
366 OopMap* oop_map = save_live_registers(sasm);
367
368 if (has_argument) {
369 __ ldr(R1, Address(SP, arg1_offset));
370 }
371
372 int call_offset = __ call_RT(noreg, noreg, target);
373 OopMapSet* oop_maps = new OopMapSet();
374 oop_maps->add_gc_map(call_offset, oop_map);
375
376 DEBUG_ONLY(STOP("generate_exception_throw");) // Should not reach here
377 return oop_maps;
378 }
|
515 DEBUG_ONLY(STOP("generate_patching");) // Should not reach here
516 return oop_maps;
517 }
518
519
520 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
521 const bool must_gc_arguments = true;
522 const bool dont_gc_arguments = false;
523
524 OopMapSet* oop_maps = NULL;
525 bool save_fpu_registers = HaveVFP;
526
527 switch (id) {
528 case forward_exception_id:
529 {
530 oop_maps = generate_handle_exception(id, sasm);
531 // does not return on ARM
532 }
533 break;
534
535 #if INCLUDE_ALL_GCS
536 case g1_pre_barrier_slow_id:
537 {
538 // Input:
539 // - pre_val pushed on the stack
540
541 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
542
543 BarrierSet* bs = BarrierSet::barrier_set();
544 if (bs->kind() != BarrierSet::G1BarrierSet) {
545 __ mov(R0, (int)id);
546 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
547 __ should_not_reach_here();
548 break;
549 }
550
551 // save at least the registers that need saving if the runtime is called
552 #ifdef AARCH64
553 __ raw_push(R0, R1);
554 __ raw_push(R2, R3);
555 const int nb_saved_regs = 4;
556 #else // AARCH64
557 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
558 const int nb_saved_regs = 6;
559 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
560 __ push(saved_regs);
561 #endif // AARCH64
562
563 const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
564 const Register r_index_1 = R1;
565 const Register r_buffer_2 = R2;
566
567 Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
568 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
569 Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
570
571 Label done;
572 Label runtime;
573
574 // Is marking still active?
575 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
576 __ ldrb(R1, queue_active);
577 __ cbz(R1, done);
578
579 __ ldr(r_index_1, queue_index);
580 __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
581 __ ldr(r_buffer_2, buffer);
582
583 __ subs(r_index_1, r_index_1, wordSize);
584 __ b(runtime, lt);
585
586 __ str(r_index_1, queue_index);
587 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
588
589 __ bind(done);
590
591 #ifdef AARCH64
592 __ raw_pop(R2, R3);
593 __ raw_pop(R0, R1);
594 #else // AARCH64
595 __ pop(saved_regs);
596 #endif // AARCH64
597
598 __ ret();
599
600 __ bind(runtime);
601
602 save_live_registers(sasm);
603
604 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
605 __ mov(c_rarg1, Rthread);
606 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
607
608 restore_live_registers_without_return(sasm);
609
610 __ b(done);
611 }
612 break;
613 case g1_post_barrier_slow_id:
614 {
615 // Input:
616 // - store_addr, pushed on the stack
617
618 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
619
620 BarrierSet* bs = BarrierSet::barrier_set();
621 if (bs->kind() != BarrierSet::G1BarrierSet) {
622 __ mov(R0, (int)id);
623 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
624 __ should_not_reach_here();
625 break;
626 }
627
628 Label done;
629 Label recheck;
630 Label runtime;
631
632 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
633 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
634
635 AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
636
637 // save at least the registers that need saving if the runtime is called
638 #ifdef AARCH64
639 __ raw_push(R0, R1);
640 __ raw_push(R2, R3);
641 const int nb_saved_regs = 4;
642 #else // AARCH64
643 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
644 const int nb_saved_regs = 6;
645 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
646 __ push(saved_regs);
647 #endif // AARCH64
648
649 const Register r_card_addr_0 = R0; // must be R0 for the slow case
650 const Register r_obj_0 = R0;
651 const Register r_card_base_1 = R1;
652 const Register r_tmp2 = R2;
653 const Register r_index_2 = R2;
654 const Register r_buffer_3 = R3;
655 const Register tmp1 = Rtemp;
656
657 __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
658 // Note: there is a comment in x86 code about not using
659 // ExternalAddress / lea, due to relocation not working
660 // properly for that address. Should be OK for arm, where we
661 // explicitly specify that 'cardtable' has a relocInfo::none
662 // type.
663 __ lea(r_card_base_1, cardtable);
664 __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
665
666 // first quick check without barrier
667 __ ldrb(r_tmp2, Address(r_card_addr_0));
668
669 __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
670 __ b(recheck, ne);
671
672 __ bind(done);
673
674 #ifdef AARCH64
675 __ raw_pop(R2, R3);
676 __ raw_pop(R0, R1);
677 #else // AARCH64
678 __ pop(saved_regs);
679 #endif // AARCH64
680
681 __ ret();
682
683 __ bind(recheck);
684
685 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
686
687 // reload card state after the barrier that ensures the stored oop was visible
688 __ ldrb(r_tmp2, Address(r_card_addr_0));
689
690 assert(CardTable::dirty_card_val() == 0, "adjust this code");
691 __ cbz(r_tmp2, done);
692
693 // storing region crossing non-NULL, card is clean.
694 // dirty card and log.
695
696 assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
697 if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
698 // Card table is aligned so the lowest byte of the table address base is zero.
699 __ strb(r_card_base_1, Address(r_card_addr_0));
700 } else {
701 __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
702 }
703
704 __ ldr(r_index_2, queue_index);
705 __ ldr(r_buffer_3, buffer);
706
707 __ subs(r_index_2, r_index_2, wordSize);
708 __ b(runtime, lt); // go to runtime if now negative
709
710 __ str(r_index_2, queue_index);
711
712 __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
713
714 __ b(done);
715
716 __ bind(runtime);
717
718 save_live_registers(sasm);
719
720 assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
721 __ mov(c_rarg1, Rthread);
722 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
723
724 restore_live_registers_without_return(sasm);
725
726 __ b(done);
727 }
728 break;
729 #endif // INCLUDE_ALL_GCS
730 case new_instance_id:
731 case fast_new_instance_id:
732 case fast_new_instance_init_check_id:
733 {
734 const Register result = R0;
735 const Register klass = R1;
736
737 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
738 // We come here when TLAB allocation failed.
739 // In this case we try to allocate directly from eden.
740 Label slow_case, slow_case_no_pop;
741
742 // Make sure the class is fully initialized
743 if (id == fast_new_instance_init_check_id) {
744 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset()));
745 __ cmp(result, InstanceKlass::fully_initialized);
746 __ b(slow_case_no_pop, ne);
747 }
748
|
517 DEBUG_ONLY(STOP("generate_patching");) // Should not reach here
518 return oop_maps;
519 }
520
521
522 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
523 const bool must_gc_arguments = true;
524 const bool dont_gc_arguments = false;
525
526 OopMapSet* oop_maps = NULL;
527 bool save_fpu_registers = HaveVFP;
528
529 switch (id) {
530 case forward_exception_id:
531 {
532 oop_maps = generate_handle_exception(id, sasm);
533 // does not return on ARM
534 }
535 break;
536
537 case new_instance_id:
538 case fast_new_instance_id:
539 case fast_new_instance_init_check_id:
540 {
541 const Register result = R0;
542 const Register klass = R1;
543
544 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
545 // We come here when TLAB allocation failed.
546 // In this case we try to allocate directly from eden.
547 Label slow_case, slow_case_no_pop;
548
549 // Make sure the class is fully initialized
550 if (id == fast_new_instance_init_check_id) {
551 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset()));
552 __ cmp(result, InstanceKlass::fully_initialized);
553 __ b(slow_case_no_pop, ne);
554 }
555
|