/* * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/shared/memset_with_concurrent_readers.hpp" #include "logging/log.hpp" #include "runtime/atomic.hpp" #include "runtime/orderAccess.hpp" bool G1CardTable::mark_card_deferred(size_t card_index) { CardValue val = _byte_map[card_index]; // It's already processed if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { return false; } // Cached bit can be installed either on a clean card or on a claimed card. CardValue new_val = val; if (val == clean_card_val()) { new_val = deferred_card_val(); } else { if (val & claimed_card_val()) { new_val = val | deferred_card_val(); } } if (new_val != val) { Atomic::cmpxchg(new_val, &_byte_map[card_index], val); } return true; } void G1CardTable::g1_mark_as_young(const MemRegion& mr) { CardValue *const first = byte_for(mr.start()); CardValue *const last = byte_after(mr.last()); memset_with_concurrent_readers(first, g1_young_gen, last - first); } #ifndef PRODUCT void G1CardTable::verify_g1_young_region(MemRegion mr) { // G1FastWriteBarrier does not use the value G1CardTable::g1_young_gen // for cards. Cards for a young region can be dirty or clean, and they are // never scanned. So there is nothing to verify. if (!G1FastWriteBarrier) { verify_region(mr, g1_young_gen, true); } } void G1CardTable::verfiy_claimed_dirty_region(MemRegion mr) { // _ct->print_content_for_mr(mr, tty); // All cards should be either "claimed" or "claimed and deferred". const CardValue claimed_val = dirty_card_val() | claimed_card_val(); const CardValue deferred_val = claimed_val | deferred_card_val(); CardValue* start = byte_for(mr.start()); CardValue* end = byte_for(mr.last()); bool failures = false; for (CardValue* curr = start; curr <= end; ++curr) { CardValue curr_val = *curr; bool failed = (curr_val != claimed_val && curr_val != deferred_val); if (failed) { if (!failures) { log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); log_error(gc, verify)("== expecting value: %d or %d", claimed_val, deferred_val); failures = true; } log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", p2i(curr), p2i(addr_for(curr)), p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), (int) curr_val); } } guarantee(!failures, "there should be no failure"); } #endif void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); _card_table->clear(mr); } void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) { mapper->set_mapping_changed_listener(&_listener); _byte_map_size = mapper->reserved().byte_size(); _guard_index = cards_required(_whole_heap.word_size()) - 1; _last_valid_index = _guard_index - 1; HeapWord* low_bound = _whole_heap.start(); HeapWord* high_bound = _whole_heap.end(); _cur_covered_regions = 1; _covered[0] = _whole_heap; _byte_map = (CardValue*) mapper->reserved().start(); _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); log_trace(gc, barrier)("G1CardTable::G1CardTable: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); } bool G1CardTable::is_in_young(oop obj) const { if (G1FastWriteBarrier) { return G1CollectedHeap::heap()->heap_region_containing(obj)->is_young(); } else { volatile CardValue* p = byte_for(obj); return *p == G1CardTable::g1_young_card_val(); } }