/* * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1BSCodeGen.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/c1G1BSCodeGen.hpp" #include "gc/g1/c2G1BSCodeGen.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/g1/satbMarkQueue.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" // Set of all SATB queues. static SATBMarkQueueSet _satb_mark_queue_set; // Set of all dirty card queues. static DirtyCardQueueSet _dirty_card_queue_set; // SATB marking queue support SATBMarkQueueSet& G1BarrierSet::satb_mark_queue_set() { return _satb_mark_queue_set; } // Dirty card queue support DirtyCardQueueSet& G1BarrierSet::dirty_card_queue_set() { return _dirty_card_queue_set; } G1BarrierSet::G1BarrierSet(CardTable* card_table) : CardTableModRefBS(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)) { } void G1BarrierSet::satb_enqueue(oop pre_val) { // Nulls should have been already filtered. assert(pre_val->is_oop(true), "Error"); if (!_satb_mark_queue_set.is_active()) return; Thread* thr = Thread::current(); if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; jt->satb_mark_queue().enqueue(pre_val); } else { MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); _satb_mark_queue_set.shared_satb_queue()->enqueue(pre_val); } } void G1BarrierSet::card_enqueue(volatile jbyte* card) { Thread* thr = Thread::current(); if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; jt->dirty_card_queue().enqueue(card); } else { MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); dirty_card_queue_set().shared_dirty_card_queue()->enqueue(card); } } BarrierSetCodeGen *G1BarrierSet::make_code_gen() { return new G1BSCodeGen(); } C1BarrierSetCodeGen* G1BarrierSet::make_c1_code_gen() { return new C1G1BSCodeGen(); } C2BarrierSetCodeGen* G1BarrierSet::make_c2_code_gen() { return new C2G1BSCodeGen(); } template void G1BarrierSet::write_ref_array_pre_work(T* dst, int count) { if (!_satb_mark_queue_set.is_active()) return; T* elem_ptr = dst; for (int i = 0; i < count; i++, elem_ptr++) { T heap_oop = oopDesc::load_heap_oop(elem_ptr); if (!oopDesc::is_null(heap_oop)) { satb_enqueue(oopDesc::decode_heap_oop_not_null(heap_oop)); } } } void G1BarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { if (!dest_uninitialized) { write_ref_array_pre_work(dst, count); } } void G1BarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { if (!dest_uninitialized) { write_ref_array_pre_work(dst, count); } } void G1BarrierSet::invalidate(MemRegion mr) { volatile jbyte* byte = _card_table->byte_for(mr.start()); jbyte* last_byte = _card_table->byte_for(mr.last()); Thread* thr = Thread::current(); // skip all consecutive young cards for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++); if (byte <= last_byte) { OrderAccess::storeload(); // Enqueue if necessary. if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; for (; byte <= last_byte; byte++) { if (*byte == G1CardTable::g1_young_card_val()) { continue; } if (*byte != G1CardTable::dirty_card_val()) { *byte = G1CardTable::dirty_card_val(); jt->dirty_card_queue().enqueue(byte); } } } else { MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); for (; byte <= last_byte; byte++) { if (*byte == G1CardTable::g1_young_card_val()) { continue; } if (*byte != G1CardTable::dirty_card_val()) { *byte = G1CardTable::dirty_card_val(); _dirty_card_queue_set.shared_dirty_card_queue()->enqueue(byte); } } } } } void G1BarrierSet::write_ref_nmethod_post(nmethod* nm, oop obj) { if (obj != NULL) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapRegion* hr = g1h->heap_region_containing(obj); hr->add_strong_code_root(nm); } } class G1EnsureLastRefToRegion : public OopClosure { G1CollectedHeap* _g1h; HeapRegion* _hr; oop* _dst; bool _value; public: G1EnsureLastRefToRegion(G1CollectedHeap* g1h, HeapRegion* hr, oop* dst) : _g1h(g1h), _hr(hr), _dst(dst), _value(true) {} void do_oop(oop* p) { if (_value && p != _dst) { oop obj = oopDesc::load_heap_oop(p); if (obj != NULL) { HeapRegion* hr = _g1h->heap_region_containing(obj); if (hr == _hr) { // Another reference to the same region. _value = false; } } } } void do_oop(narrowOop* p) { ShouldNotReachHere(); } bool value() const { return _value; } }; void G1BarrierSet::write_ref_nmethod_pre(nmethod* nm, ptrdiff_t offset) { oop* dst = (oop*)(intptr_t(nm) + offset); oop obj = oopDesc::load_heap_oop(dst); if (obj != NULL) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapRegion* hr = g1h->heap_region_containing(obj); G1EnsureLastRefToRegion ensure_last_ref(g1h, hr, dst); nm->oops_do(&ensure_last_ref); if (ensure_last_ref.value()) { // Last reference to this region, remove the nmethod from the rset. hr->remove_strong_code_root(nm); } } } void G1BarrierSet::on_add_thread(JavaThread* thread) { assert(!SafepointSynchronize::is_at_safepoint(), "we should not be at a safepoint"); SATBMarkQueue& satb_queue = thread->satb_mark_queue(); SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set(); // The SATB queue should have been constructed with its active // field set to false. assert(!satb_queue.is_active(), "SATB queue should not be active"); assert(satb_queue.is_empty(), "SATB queue should be empty"); // If we are creating the thread during a marking cycle, we should // set the active field of the SATB queue to true. if (satb_queue_set.is_active()) { satb_queue.set_active(true); } DirtyCardQueue& dirty_queue = thread->dirty_card_queue(); // The dirty card queue should have been constructed with its // active field set to true. assert(dirty_queue.is_active(), "dirty card queue should be active"); } void G1BarrierSet::on_destroy_thread(JavaThread* thread) { CardTableModRefBS::on_destroy_thread(thread); thread->satb_mark_queue().flush(); thread->dirty_card_queue().flush(); } bool G1BarrierSet::is_referent_field(oop base, ptrdiff_t offset) { Klass* k; return offset == java_lang_ref_Reference::referent_offset && (k = base->klass()) && InstanceKlass::cast(k)->reference_type() != REF_NONE; } // G1 write-barrier pre: executed before a pointer store. JRT_LEAF(void, G1BarrierSet::g1_wb_pre(oopDesc* orig, JavaThread *thread)) if (orig == NULL) { assert(false, "should be optimized out"); return; } assert(orig->is_oop(true /* ignore mark word */), "Error"); // store the original value that was in the field reference thread->satb_mark_queue().enqueue(orig); JRT_END // G1 write-barrier post: executed after a pointer store. JRT_LEAF(void, G1BarrierSet::g1_wb_post(void* card_addr, JavaThread* thread)) thread->dirty_card_queue().enqueue(card_addr); JRT_END