1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/cardTable.hpp" 29 #include "gc/shared/cardTableBarrierSet.hpp" 30 #include "gc/shared/cardTableBarrierSetAssembler.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "runtime/globals.hpp" 33 34 #define __ masm-> 35 36 #ifdef PRODUCT 37 #define BLOCK_COMMENT(str) /* nothing */ 38 #else 39 #define BLOCK_COMMENT(str) __ block_comment(str) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 44 void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 45 Register addr, Register count, Register tmp) { 46 BLOCK_COMMENT("CardTablePostBarrier"); 47 BarrierSet* bs = BarrierSet::barrier_set(); 48 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); 49 CardTable* ct = ctbs->card_table(); 50 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); 51 52 Label L_cardtable_loop, L_done; 53 54 __ cbz_32(count, L_done); // zero count - nothing to do 55 56 __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop); 57 __ sub(count, count, BytesPerHeapOop); // last addr 58 59 __ logical_shift_right(addr, addr, CardTable::card_shift); 60 __ logical_shift_right(count, count, CardTable::card_shift); 61 __ sub(count, count, addr); // nb of cards 62 63 // warning: Rthread has not been preserved 64 __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference); 65 __ add(addr,tmp, addr); 66 67 Register zero = __ zero_register(tmp); 68 69 __ BIND(L_cardtable_loop); 70 __ strb(zero, Address(addr, 1, post_indexed)); 71 __ subs(count, count, 1); 72 __ b(L_cardtable_loop, ge); 73 __ BIND(L_done); 74 } 75 76 void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 77 Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) { 78 bool is_array = (decorators & IS_ARRAY) != 0; 79 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 80 bool precise = is_array || on_anonymous; 81 82 if (is_null) { 83 BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, true); 84 } else { 85 assert (!precise || (obj.index() == noreg && obj.disp() == 0), 86 "store check address should be calculated beforehand"); 87 88 store_check_part1(masm, tmp1); 89 BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, false); 90 new_val = noreg; 91 store_check_part2(masm, obj.base(), tmp1, tmp2); 92 } 93 } 94 95 // The 1st part of the store check. 96 // Sets card_table_base register. 97 void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Register card_table_base) { 98 // Check barrier set type (should be card table) and element size 99 BarrierSet* bs = BarrierSet::barrier_set(); 100 assert(bs->kind() == BarrierSet::CardTableBarrierSet, 101 "Wrong barrier set kind"); 102 103 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); 104 CardTable* ct = ctbs->card_table(); 105 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code"); 106 107 // Load card table base address. 108 109 /* Performance note. 110 111 There is an alternative way of loading card table base address 112 from thread descriptor, which may look more efficient: 113 114 ldr(card_table_base, Address(Rthread, JavaThread::card_table_base_offset())); 115 116 However, performance measurements of micro benchmarks and specJVM98 117 showed that loading of card table base from thread descriptor is 118 7-18% slower compared to loading of literal embedded into the code. 119 Possible cause is a cache miss (card table base address resides in a 120 rarely accessed area of thread descriptor). 121 */ 122 // TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64 123 __ mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference); 124 } 125 126 // The 2nd part of the store check. 127 void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Register obj, Register card_table_base, Register tmp) { 128 assert_different_registers(obj, card_table_base, tmp); 129 130 BarrierSet* bs = BarrierSet::barrier_set(); 131 assert(bs->kind() == BarrierSet::CardTableBarrierSet, 132 "Wrong barrier set kind"); 133 134 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); 135 CardTable* ct = ctbs->card_table(); 136 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code"); 137 138 assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations."); 139 #ifdef AARCH64 140 add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift)); 141 Address card_table_addr(card_table_base); 142 #else 143 Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift); 144 #endif 145 146 if (UseCondCardMark) { 147 if (ct->scanned_concurrently()) { 148 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg); 149 } 150 Label already_dirty; 151 152 __ ldrb(tmp, card_table_addr); 153 __ cbz(tmp, already_dirty); 154 155 set_card(masm, card_table_base, card_table_addr, tmp); 156 __ bind(already_dirty); 157 158 } else { 159 if (ct->scanned_concurrently()) { 160 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg); 161 } 162 set_card(masm, card_table_base, card_table_addr, tmp); 163 } 164 } 165 166 void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) { 167 #ifdef AARCH64 168 strb(ZR, card_table_addr); 169 #else 170 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 171 CardTable* ct = ctbs->card_table(); 172 if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) { 173 // Card table is aligned so the lowest byte of the table address base is zero. 174 // This works only if the code is not saved for later use, possibly 175 // in a context where the base would no longer be aligned. 176 __ strb(card_table_base, card_table_addr); 177 } else { 178 __ mov(tmp, 0); 179 __ strb(tmp, card_table_addr); 180 } 181 #endif // AARCH64 182 }