0 /*
1 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
0 /*
1 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
40 // considerations into account.
41
42 class CardTableModRefBS: public ModRefBarrierSet {
43 // Some classes get to look at some private stuff.
44 friend class VMStructs;
45 protected:
46
47 enum CardValues {
48 clean_card = -1,
49 // The mask contains zeros in places for all other values.
50 clean_card_mask = clean_card - 31,
51
52 dirty_card = 0,
53 precleaned_card = 1,
54 claimed_card = 2,
55 deferred_card = 4,
56 last_card = 8,
57 CT_MR_BS_last_reserved = 16
58 };
59
60 // a word's worth (row) of clean card values
61 static const intptr_t clean_card_row = (intptr_t)(-1);
62
63 // The declaration order of these const fields is important; see the
64 // constructor before changing.
65 const MemRegion _whole_heap; // the region covered by the card table
66 size_t _guard_index; // index of very last element in the card
67 // table; it is set to a guard value
68 // (last_card) and should never be modified
69 size_t _last_valid_index; // index of the last valid element
70 const size_t _page_size; // page size used when mapping _byte_map
71 size_t _byte_map_size; // in bytes
72 jbyte* _byte_map; // the card marking array
73
74 // Some barrier sets create tables whose elements correspond to parts of
75 // the heap; the CardTableModRefBS is an example. Such barrier sets will
76 // normally reserve space for such tables, and commit parts of the table
77 // "covering" parts of the heap that are committed. At most one covered
78 // region per generation is needed.
|
40 // considerations into account.
41
42 class CardTableModRefBS: public ModRefBarrierSet {
43 // Some classes get to look at some private stuff.
44 friend class VMStructs;
45 protected:
46
47 enum CardValues {
48 clean_card = -1,
49 // The mask contains zeros in places for all other values.
50 clean_card_mask = clean_card - 31,
51
52 dirty_card = 0,
53 precleaned_card = 1,
54 claimed_card = 2,
55 deferred_card = 4,
56 last_card = 8,
57 CT_MR_BS_last_reserved = 16
58 };
59
60 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
61 // or INCLUDE_JVMCI is being used
62 bool _defer_initial_card_mark;
63
64 // a word's worth (row) of clean card values
65 static const intptr_t clean_card_row = (intptr_t)(-1);
66
67 // The declaration order of these const fields is important; see the
68 // constructor before changing.
69 const MemRegion _whole_heap; // the region covered by the card table
70 size_t _guard_index; // index of very last element in the card
71 // table; it is set to a guard value
72 // (last_card) and should never be modified
73 size_t _last_valid_index; // index of the last valid element
74 const size_t _page_size; // page size used when mapping _byte_map
75 size_t _byte_map_size; // in bytes
76 jbyte* _byte_map; // the card marking array
77
78 // Some barrier sets create tables whose elements correspond to parts of
79 // the heap; the CardTableModRefBS is an example. Such barrier sets will
80 // normally reserve space for such tables, and commit parts of the table
81 // "covering" parts of the heap that are committed. At most one covered
82 // region per generation is needed.
|
162 static int claimed_card_val() { return claimed_card; }
163 static int precleaned_card_val() { return precleaned_card; }
164 static int deferred_card_val() { return deferred_card; }
165
166 virtual void initialize();
167
168 // *** Barrier set functions.
169
170 // Initialization utilities; covered_words is the size of the covered region
171 // in, um, words.
172 inline size_t cards_required(size_t covered_words) {
173 // Add one for a guard card, used to detect errors.
174 const size_t words = align_up(covered_words, card_size_in_words);
175 return words / card_size_in_words + 1;
176 }
177
178 protected:
179 CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
180 ~CardTableModRefBS();
181
182 protected:
183 void write_region_work(MemRegion mr) {
184 dirty_MemRegion(mr);
185 }
186
187 protected:
188 void write_ref_array_work(MemRegion mr) {
189 dirty_MemRegion(mr);
190 }
191
192 public:
193 bool is_aligned(HeapWord* addr) {
194 return is_card_aligned(addr);
195 }
196
197 // *** Card-table-barrier-specific things.
198
199 // Record a reference update. Note that these versions are precise!
200 // The scanning code has to handle the fact that the write barrier may be
201 // either precise or imprecise. We make non-virtual inline variants of
202 // these functions here for performance.
|
166 static int claimed_card_val() { return claimed_card; }
167 static int precleaned_card_val() { return precleaned_card; }
168 static int deferred_card_val() { return deferred_card; }
169
170 virtual void initialize();
171
172 // *** Barrier set functions.
173
174 // Initialization utilities; covered_words is the size of the covered region
175 // in, um, words.
176 inline size_t cards_required(size_t covered_words) {
177 // Add one for a guard card, used to detect errors.
178 const size_t words = align_up(covered_words, card_size_in_words);
179 return words / card_size_in_words + 1;
180 }
181
182 protected:
183 CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
184 ~CardTableModRefBS();
185
186 public:
187 void write_region(MemRegion mr) {
188 dirty_MemRegion(mr);
189 }
190
191 protected:
192 void write_ref_array_work(MemRegion mr) {
193 dirty_MemRegion(mr);
194 }
195
196 public:
197 bool is_aligned(HeapWord* addr) {
198 return is_card_aligned(addr);
199 }
200
201 // *** Card-table-barrier-specific things.
202
203 // Record a reference update. Note that these versions are precise!
204 // The scanning code has to handle the fact that the write barrier may be
205 // either precise or imprecise. We make non-virtual inline variants of
206 // these functions here for performance.
|
295 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
296 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
297 return byte_for(p) - _byte_map;
298 }
299
300 const jbyte* byte_for_index(const size_t card_index) const {
301 return _byte_map + card_index;
302 }
303
304 // Print a description of the memory for the barrier set
305 virtual void print_on(outputStream* st) const;
306
307 void verify();
308 void verify_guard();
309
310 // val_equals -> it will check that all cards covered by mr equal val
311 // !val_equals -> it will check that all cards covered by mr do not equal val
312 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
313 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
314 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
315
316 template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
317 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
318 };
319
320 template<>
321 struct BarrierSet::GetName<CardTableModRefBS> {
322 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
323 };
324
325 template<>
326 struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
327 typedef CardTableModRefBS type;
328 };
329
330 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|
299 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
300 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
301 return byte_for(p) - _byte_map;
302 }
303
304 const jbyte* byte_for_index(const size_t card_index) const {
305 return _byte_map + card_index;
306 }
307
308 // Print a description of the memory for the barrier set
309 virtual void print_on(outputStream* st) const;
310
311 void verify();
312 void verify_guard();
313
314 // val_equals -> it will check that all cards covered by mr equal val
315 // !val_equals -> it will check that all cards covered by mr do not equal val
316 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
317 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
318 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
319
320 // ReduceInitialCardMarks
321 void initialize_deferred_card_mark_barriers();
322
323 // If the CollectedHeap was asked to defer a store barrier above,
324 // this informs it to flush such a deferred store barrier to the
325 // remembered set.
326 void flush_deferred_card_mark_barrier(JavaThread* thread);
327
328 // Can a compiler initialize a new object without store barriers?
329 // This permission only extends from the creation of a new object
330 // via a TLAB up to the first subsequent safepoint. If such permission
331 // is granted for this heap type, the compiler promises to call
332 // defer_store_barrier() below on any slow path allocation of
333 // a new object for which such initializing store barriers will
334 // have been elided. G1, like CMS, allows this, but should be
335 // ready to provide a compensating write barrier as necessary
336 // if that storage came out of a non-young region. The efficiency
337 // of this implementation depends crucially on being able to
338 // answer very efficiently in constant time whether a piece of
339 // storage in the heap comes from a young region or not.
340 // See ReduceInitialCardMarks.
341 virtual bool can_elide_tlab_store_barriers() const {
342 return true;
343 }
344
345 // If a compiler is eliding store barriers for TLAB-allocated objects,
346 // we will be informed of a slow-path allocation by a call
347 // to on_slowpath_allocation_exit() below. Such a call precedes the
348 // initialization of the object itself, and no post-store-barriers will
349 // be issued. Some heap types require that the barrier strictly follows
350 // the initializing stores. (This is currently implemented by deferring the
351 // barrier until the next slow-path allocation or gc-related safepoint.)
352 // This interface answers whether a particular barrier type needs the card
353 // mark to be thus strictly sequenced after the stores.
354 virtual bool card_mark_must_follow_store() const = 0;
355
356 virtual bool is_in_young(oop obj) const = 0;
357
358 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
359 virtual void flush_deferred_barriers(JavaThread* thread);
360
361 virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
362
363 template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
364 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
365 };
366
367 template<>
368 struct BarrierSet::GetName<CardTableModRefBS> {
369 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
370 };
371
372 template<>
373 struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
374 typedef CardTableModRefBS type;
375 };
376
377 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|