26 #include "precompiled.hpp"
27 #include "asm/codeBuffer.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "gc/shared/cardTableModRefBS.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "opto/compile.hpp"
37 #include "opto/intrinsicnode.hpp"
38 #include "opto/matcher.hpp"
39 #include "prims/methodHandles.hpp"
40 #include "registerSaver_s390.hpp"
41 #include "runtime/biasedLocking.hpp"
42 #include "runtime/icache.hpp"
43 #include "runtime/interfaceSupport.hpp"
44 #include "runtime/objectMonitor.hpp"
45 #include "runtime/os.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "utilities/events.hpp"
49 #include "utilities/macros.hpp"
50 #if INCLUDE_ALL_GCS
51 #include "gc/g1/g1CollectedHeap.inline.hpp"
52 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
53 #include "gc/g1/heapRegion.hpp"
54 #endif
55
56 #include <ucontext.h>
57
58 #define BLOCK_COMMENT(str) block_comment(str)
59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
60
61 // Move 32-bit register if destination and source are different.
62 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
63 if (rs != rd) { z_lr(rd, rs); }
64 }
65
2002 }
2003
2004 guarantee(false, "not a pcrelative instruction to patch!");
2005 }
2006
2007 // "Current PC" here means the address just behind the basr instruction.
2008 address MacroAssembler::get_PC(Register result) {
2009 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2010 return pc();
2011 }
2012
2013 // Get current PC + offset.
2014 // Offset given in bytes, must be even!
2015 // "Current PC" here means the address of the larl instruction plus the given offset.
2016 address MacroAssembler::get_PC(Register result, int64_t offset) {
2017 address here = pc();
2018 z_larl(result, offset/2); // Save target instruction address in result.
2019 return here + offset;
2020 }
2021
2022 // Resize_frame with SP(new) = SP(old) - [offset].
2023 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2024 {
2025 assert_different_registers(offset, fp, Z_SP);
2026 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2027
2028 z_sgr(Z_SP, offset);
2029 z_stg(fp, _z_abi(callers_sp), Z_SP);
2030 }
2031
2032 // Resize_frame with SP(new) = [newSP] + offset.
2033 // This emitter is useful if we already have calculated a pointer
2034 // into the to-be-allocated stack space, e.g. with special alignment properties,
2035 // but need some additional space, e.g. for spilling.
2036 // newSP is the pre-calculated pointer. It must not be modified.
2037 // fp holds, or is filled with, the frame pointer.
2038 // offset is the additional increment which is added to addr to form the new SP.
2039 // Note: specify a negative value to reserve more space!
2040 // load_fp == true only indicates that fp is not pre-filled with the frame pointer.
2041 // It does not guarantee that fp contains the frame pointer at the end.
2688 // offset to write to within the page. This minimizes bus traffic
2689 // due to cache line collision.
2690 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2691 assert_different_registers(tmp1, tmp2);
2692 z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2693 load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2694
2695 int mask = os::get_serialize_page_mask();
2696 if (Immediate::is_uimm16(mask)) {
2697 z_nill(tmp2, mask);
2698 z_llghr(tmp2, tmp2);
2699 } else {
2700 z_nilf(tmp2, mask);
2701 z_llgfr(tmp2, tmp2);
2702 }
2703
2704 z_release();
2705 z_st(Z_R0, 0, tmp2, tmp1);
2706 }
2707
2708 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2709 void MacroAssembler::bang_stack_with_offset(int offset) {
2710 // Stack grows down, caller passes positive offset.
2711 assert(offset > 0, "must bang with positive offset");
2712 if (Displacement::is_validDisp(-offset)) {
2713 z_tmy(-offset, Z_SP, mask_stackbang);
2714 } else {
2715 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!!
2716 z_tm(0, Z_R1, mask_stackbang); // Just banging.
2717 }
2718 }
2719
2720 void MacroAssembler::reserved_stack_check(Register return_pc) {
2721 // Test if reserved zone needs to be enabled.
2722 Label no_reserved_zone_enabling;
2723 assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2724 BLOCK_COMMENT("reserved_stack_check {");
2725
2726 z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2727 z_brl(no_reserved_zone_enabling);
6433 }
6434
6435 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6436 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6437 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6438
6439 Label retry;
6440 bind(retry);
6441 Assembler::z_trto(r1, r2, m3);
6442 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6443 }
6444
6445 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6446 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6447 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6448
6449 Label retry;
6450 bind(retry);
6451 Assembler::z_trtt(r1, r2, m3);
6452 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6453 }
6454
6455 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) {
6456 if (scratch == noreg) scratch = Z_R1;
6457 address Astate = SafepointSynchronize::address_of_state();
6458 BLOCK_COMMENT("safepoint check:");
6459
6460 if (may_relocate) {
6461 ptrdiff_t total_distance = Astate - this->pc();
6462 if (RelAddr::is_in_range_of_RelAddr32(total_distance)) {
6463 RelocationHolder rspec = external_word_Relocation::spec(Astate);
6464 (this)->relocate(rspec, relocInfo::pcrel_addr_format);
6465 load_absolute_address(scratch, Astate);
6466 } else {
6467 load_const_optimized(scratch, Astate);
6468 }
6469 } else {
6470 load_absolute_address(scratch, Astate);
6471 }
6472 z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized);
6473 z_brne(slow_path);
6474 }
6475
6476
6477 void MacroAssembler::generate_type_profiling(const Register Rdata,
6478 const Register Rreceiver_klass,
6479 const Register Rwanted_receiver_klass,
6480 const Register Rmatching_row,
6481 bool is_virtual_call) {
6482 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
6483 in_bytes(ReceiverTypeData::receiver_offset(0));
6484 const int num_rows = ReceiverTypeData::row_limit();
6485 NearLabel found_free_row;
6486 NearLabel do_increment;
6487 NearLabel found_no_slot;
6488
6489 BLOCK_COMMENT("type profiling {");
6490
6491 // search for:
6492 // a) The type given in Rwanted_receiver_klass.
6493 // b) The *first* empty row.
|
26 #include "precompiled.hpp"
27 #include "asm/codeBuffer.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "gc/shared/cardTableModRefBS.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "opto/compile.hpp"
37 #include "opto/intrinsicnode.hpp"
38 #include "opto/matcher.hpp"
39 #include "prims/methodHandles.hpp"
40 #include "registerSaver_s390.hpp"
41 #include "runtime/biasedLocking.hpp"
42 #include "runtime/icache.hpp"
43 #include "runtime/interfaceSupport.hpp"
44 #include "runtime/objectMonitor.hpp"
45 #include "runtime/os.hpp"
46 #include "runtime/safepoint.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "utilities/events.hpp"
51 #include "utilities/macros.hpp"
52 #if INCLUDE_ALL_GCS
53 #include "gc/g1/g1CollectedHeap.inline.hpp"
54 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
55 #include "gc/g1/heapRegion.hpp"
56 #endif
57
58 #include <ucontext.h>
59
60 #define BLOCK_COMMENT(str) block_comment(str)
61 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
62
63 // Move 32-bit register if destination and source are different.
64 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
65 if (rs != rd) { z_lr(rd, rs); }
66 }
67
2004 }
2005
2006 guarantee(false, "not a pcrelative instruction to patch!");
2007 }
2008
2009 // "Current PC" here means the address just behind the basr instruction.
2010 address MacroAssembler::get_PC(Register result) {
2011 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2012 return pc();
2013 }
2014
2015 // Get current PC + offset.
2016 // Offset given in bytes, must be even!
2017 // "Current PC" here means the address of the larl instruction plus the given offset.
2018 address MacroAssembler::get_PC(Register result, int64_t offset) {
2019 address here = pc();
2020 z_larl(result, offset/2); // Save target instruction address in result.
2021 return here + offset;
2022 }
2023
2024 void MacroAssembler::instr_size(Register size, Register pc) {
2025 // Extract 2 most significant bits of current instruction.
2026 z_llgc(size, Address(pc));
2027 z_srl(size, 6);
2028 // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2029 z_ahi(size, 3);
2030 z_nill(size, 6);
2031 }
2032
2033 // Resize_frame with SP(new) = SP(old) - [offset].
2034 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2035 {
2036 assert_different_registers(offset, fp, Z_SP);
2037 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2038
2039 z_sgr(Z_SP, offset);
2040 z_stg(fp, _z_abi(callers_sp), Z_SP);
2041 }
2042
2043 // Resize_frame with SP(new) = [newSP] + offset.
2044 // This emitter is useful if we already have calculated a pointer
2045 // into the to-be-allocated stack space, e.g. with special alignment properties,
2046 // but need some additional space, e.g. for spilling.
2047 // newSP is the pre-calculated pointer. It must not be modified.
2048 // fp holds, or is filled with, the frame pointer.
2049 // offset is the additional increment which is added to addr to form the new SP.
2050 // Note: specify a negative value to reserve more space!
2051 // load_fp == true only indicates that fp is not pre-filled with the frame pointer.
2052 // It does not guarantee that fp contains the frame pointer at the end.
2699 // offset to write to within the page. This minimizes bus traffic
2700 // due to cache line collision.
2701 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2702 assert_different_registers(tmp1, tmp2);
2703 z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2704 load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2705
2706 int mask = os::get_serialize_page_mask();
2707 if (Immediate::is_uimm16(mask)) {
2708 z_nill(tmp2, mask);
2709 z_llghr(tmp2, tmp2);
2710 } else {
2711 z_nilf(tmp2, mask);
2712 z_llgfr(tmp2, tmp2);
2713 }
2714
2715 z_release();
2716 z_st(Z_R0, 0, tmp2, tmp1);
2717 }
2718
2719 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2720 if (SafepointMechanism::uses_thread_local_poll()) {
2721 const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2722 // Armed page has poll_bit set.
2723 z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2724 z_brnaz(slow_path);
2725 } else {
2726 load_const_optimized(temp_reg, SafepointSynchronize::address_of_state());
2727 z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized);
2728 z_brne(slow_path);
2729 }
2730 }
2731
2732 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2733 void MacroAssembler::bang_stack_with_offset(int offset) {
2734 // Stack grows down, caller passes positive offset.
2735 assert(offset > 0, "must bang with positive offset");
2736 if (Displacement::is_validDisp(-offset)) {
2737 z_tmy(-offset, Z_SP, mask_stackbang);
2738 } else {
2739 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!!
2740 z_tm(0, Z_R1, mask_stackbang); // Just banging.
2741 }
2742 }
2743
2744 void MacroAssembler::reserved_stack_check(Register return_pc) {
2745 // Test if reserved zone needs to be enabled.
2746 Label no_reserved_zone_enabling;
2747 assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2748 BLOCK_COMMENT("reserved_stack_check {");
2749
2750 z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2751 z_brl(no_reserved_zone_enabling);
6457 }
6458
6459 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6460 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6461 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6462
6463 Label retry;
6464 bind(retry);
6465 Assembler::z_trto(r1, r2, m3);
6466 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6467 }
6468
6469 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6470 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6471 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6472
6473 Label retry;
6474 bind(retry);
6475 Assembler::z_trtt(r1, r2, m3);
6476 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6477 }
6478
6479
6480 void MacroAssembler::generate_type_profiling(const Register Rdata,
6481 const Register Rreceiver_klass,
6482 const Register Rwanted_receiver_klass,
6483 const Register Rmatching_row,
6484 bool is_virtual_call) {
6485 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
6486 in_bytes(ReceiverTypeData::receiver_offset(0));
6487 const int num_rows = ReceiverTypeData::row_limit();
6488 NearLabel found_free_row;
6489 NearLabel do_increment;
6490 NearLabel found_no_slot;
6491
6492 BLOCK_COMMENT("type profiling {");
6493
6494 // search for:
6495 // a) The type given in Rwanted_receiver_klass.
6496 // b) The *first* empty row.
|