0 /*
1 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/cardTableModRefBS.inline.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/genCollectedHeap.hpp"
28 #include "gc/shared/space.inline.hpp"
29 #include "logging/log.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/macros.hpp"
35
36 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
37 // enumerate ref fields that have been modified (since the last
38 // enumeration.)
39
40 size_t CardTableModRefBS::compute_byte_map_size()
41 {
42 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
43 "uninitialized, check declaration order");
44 assert(_page_size != 0, "uninitialized, check declaration order");
45 const size_t granularity = os::vm_allocation_granularity();
46 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
47 }
48
49 CardTableModRefBS::CardTableModRefBS(
50 MemRegion whole_heap,
51 const BarrierSet::FakeRtti& fake_rtti) :
52 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
53 _whole_heap(whole_heap),
54 _guard_index(0),
55 _guard_region(),
56 _last_valid_index(0),
57 _page_size(os::vm_page_size()),
58 _byte_map_size(0),
59 _covered(NULL),
60 _committed(NULL),
61 _cur_covered_regions(0),
62 _byte_map(NULL),
63 byte_map_base(NULL)
64 {
65 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
66 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
67
68 assert(card_size <= 512, "card_size must be less than 512"); // why?
69
70 _covered = new MemRegion[_max_covered_regions];
71 if (_covered == NULL) {
72 vm_exit_during_initialization("Could not allocate card table covered region set.");
73 }
74 }
75
76 void CardTableModRefBS::initialize() {
77 _guard_index = cards_required(_whole_heap.word_size()) - 1;
78 _last_valid_index = _guard_index - 1;
79
80 _byte_map_size = compute_byte_map_size();
81
82 HeapWord* low_bound = _whole_heap.start();
83 HeapWord* high_bound = _whole_heap.end();
84
85 _cur_covered_regions = 0;
86 _committed = new MemRegion[_max_covered_regions];
87 if (_committed == NULL) {
88 vm_exit_during_initialization("Could not allocate card table committed region set.");
89 }
90
91 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
92 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
93 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
94
95 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
|
0 /*
1 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/cardTableModRefBS.inline.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "gc/shared/genCollectedHeap.hpp"
28 #include "gc/shared/space.inline.hpp"
29 #include "logging/log.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/thread.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36
37 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
38 // enumerate ref fields that have been modified (since the last
39 // enumeration.)
40
41 size_t CardTableModRefBS::compute_byte_map_size()
42 {
43 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
44 "uninitialized, check declaration order");
45 assert(_page_size != 0, "uninitialized, check declaration order");
46 const size_t granularity = os::vm_allocation_granularity();
47 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
48 }
49
50 CardTableModRefBS::CardTableModRefBS(
51 MemRegion whole_heap,
52 const BarrierSet::FakeRtti& fake_rtti) :
53 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
54 _whole_heap(whole_heap),
55 _guard_index(0),
56 _guard_region(),
57 _last_valid_index(0),
58 _page_size(os::vm_page_size()),
59 _byte_map_size(0),
60 _covered(NULL),
61 _committed(NULL),
62 _cur_covered_regions(0),
63 _byte_map(NULL),
64 byte_map_base(NULL),
65 _defer_initial_card_mark(false)
66 {
67 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
68 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
69
70 assert(card_size <= 512, "card_size must be less than 512"); // why?
71
72 _covered = new MemRegion[_max_covered_regions];
73 if (_covered == NULL) {
74 vm_exit_during_initialization("Could not allocate card table covered region set.");
75 }
76 }
77
78 void CardTableModRefBS::initialize() {
79 initialize_deferred_card_mark_barriers();
80 _guard_index = cards_required(_whole_heap.word_size()) - 1;
81 _last_valid_index = _guard_index - 1;
82
83 _byte_map_size = compute_byte_map_size();
84
85 HeapWord* low_bound = _whole_heap.start();
86 HeapWord* high_bound = _whole_heap.end();
87
88 _cur_covered_regions = 0;
89 _committed = new MemRegion[_max_covered_regions];
90 if (_committed == NULL) {
91 vm_exit_during_initialization("Could not allocate card table committed region set.");
92 }
93
94 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
95 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
96 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
97
98 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
|
502 p2i(curr), p2i(addr_for(curr)),
503 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
504 (int) curr_val);
505 }
506 }
507 guarantee(!failures, "there should not have been any failures");
508 }
509
510 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
511 verify_region(mr, dirty_card, false /* val_equals */);
512 }
513
514 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
515 verify_region(mr, dirty_card, true /* val_equals */);
516 }
517 #endif
518
519 void CardTableModRefBS::print_on(outputStream* st) const {
520 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
521 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
522 }
|
505 p2i(curr), p2i(addr_for(curr)),
506 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
507 (int) curr_val);
508 }
509 }
510 guarantee(!failures, "there should not have been any failures");
511 }
512
513 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
514 verify_region(mr, dirty_card, false /* val_equals */);
515 }
516
517 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
518 verify_region(mr, dirty_card, true /* val_equals */);
519 }
520 #endif
521
522 void CardTableModRefBS::print_on(outputStream* st) const {
523 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
524 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
525 }
526
527 // Helper for ReduceInitialCardMarks. For performance,
528 // compiled code may elide card-marks for initializing stores
529 // to a newly allocated object along the fast-path. We
530 // compensate for such elided card-marks as follows:
531 // (a) Generational, non-concurrent collectors, such as
532 // GenCollectedHeap(ParNew,DefNew,Tenured) and
533 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
534 // need the card-mark if and only if the region is
535 // in the old gen, and do not care if the card-mark
536 // succeeds or precedes the initializing stores themselves,
537 // so long as the card-mark is completed before the next
538 // scavenge. For all these cases, we can do a card mark
539 // at the point at which we do a slow path allocation
540 // in the old gen, i.e. in this call.
541 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
542 // in addition that the card-mark for an old gen allocated
543 // object strictly follow any associated initializing stores.
544 // In these cases, the memRegion remembered below is
545 // used to card-mark the entire region either just before the next
546 // slow-path allocation by this thread or just before the next scavenge or
547 // CMS-associated safepoint, whichever of these events happens first.
548 // (The implicit assumption is that the object has been fully
549 // initialized by this point, a fact that we assert when doing the
550 // card-mark.)
551 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
552 // G1 concurrent marking is in progress an SATB (pre-write-)barrier
553 // is used to remember the pre-value of any store. Initializing
554 // stores will not need this barrier, so we need not worry about
555 // compensating for the missing pre-barrier here. Turning now
556 // to the post-barrier, we note that G1 needs a RS update barrier
557 // which simply enqueues a (sequence of) dirty cards which may
558 // optionally be refined by the concurrent update threads. Note
559 // that this barrier need only be applied to a non-young write,
560 // but, like in CMS, because of the presence of concurrent refinement
561 // (much like CMS' precleaning), must strictly follow the oop-store.
562 // Thus, using the same protocol for maintaining the intended
563 // invariants turns out, serendepitously, to be the same for both
564 // G1 and CMS.
565 //
566 // For any future collector, this code should be reexamined with
567 // that specific collector in mind, and the documentation above suitably
568 // extended and updated.
569 void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
570 if (!ReduceInitialCardMarks) {
571 return;
572 }
573 // If a previous card-mark was deferred, flush it now.
574 flush_deferred_card_mark_barrier(thread);
575 if (new_obj->is_typeArray() || is_in_young(new_obj)) {
576 // Arrays of non-references don't need a post-barrier.
577 // The deferred_card_mark region should be empty
578 // following the flush above.
579 assert(thread->deferred_card_mark().is_empty(), "Error");
580 } else {
581 MemRegion mr((HeapWord*)new_obj, new_obj->size());
582 assert(!mr.is_empty(), "Error");
583 if (_defer_initial_card_mark) {
584 // Defer the card mark
585 thread->set_deferred_card_mark(mr);
586 } else {
587 // Do the card mark
588 write_region(mr);
589 }
590 }
591 }
592
593 void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
594 // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
595 // otherwise remains unused.
596 #if defined(COMPILER2) || INCLUDE_JVMCI
597 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
598 && (DeferInitialCardMark || card_mark_must_follow_store());
599 #else
600 assert(_defer_initial_card_mark == false, "Who would set it?");
601 #endif
602 }
603
604 void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
605 #if defined(COMPILER2) || INCLUDE_JVMCI
606 MemRegion deferred = thread->deferred_card_mark();
607 if (!deferred.is_empty()) {
608 assert(_defer_initial_card_mark, "Otherwise should be empty");
609 {
610 // Verify that the storage points to a parsable object in heap
611 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
612 assert(!is_in_young(old_obj),
613 "Else should have been filtered in on_slowpath_allocation_exit()");
614 assert(oopDesc::is_oop(old_obj, true), "Not an oop");
615 assert(deferred.word_size() == (size_t)(old_obj->size()),
616 "Mismatch: multiple objects?");
617 }
618 write_region(deferred);
619 // "Clear" the deferred_card_mark field
620 thread->set_deferred_card_mark(MemRegion());
621 }
622 assert(thread->deferred_card_mark().is_empty(), "invariant");
623 #else
624 assert(!_defer_initial_card_mark, "Should be false");
625 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
626 #endif
627 }
628
629 void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) {
630 // The deferred store barriers must all have been flushed to the
631 // card-table (or other remembered set structure) before GC starts
632 // processing the card-table (or other remembered set).
633 flush_deferred_card_mark_barrier(thread);
634 }
|